hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
f8a2cb070ceb3bd56ea643c5a1e8af301a44954c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bfs_simple.cuh"
using namespace std;
#define DEBUG(x)
#define N_THREADS_PER_BLOCK (1 << 5)
__global__
void initializeDeviceArray(int n, int *d_arr, int value, int start_index) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == start_index) {
d_arr[start_index] = 0;
}
else if (tid < n) {
d_arr[tid] = value;
}
}
__global__
void printDeviceArray(int *d_arr, int n) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n) {
printf("d_arr[%i] = %i \n", tid, d_arr[tid]);
}
}
/*
* Given a graph and a current queue computes next vertices (vertex frontiers) to traverse.
*/
__global__
void computeNextQueue(int *adjacencyList, int *edgesOffset, int *edgesSize, int *distance,
int queueSize, int *currentQueue, int *nextQueueSize, int *nextQueue, int level) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x; // thread id
if (tid < queueSize) { // visit all vertexes in a queue in parallel
int current = currentQueue[tid];
for (int i = edgesOffset[current]; i < edgesOffset[current] + edgesSize[current]; ++i) {
int v = adjacencyList[i];
if (distance[v] == INT_MAX) {
distance[v] = level + 1;
int position = atomicAdd(nextQueueSize, 1);
nextQueue[position] = v;
}
}
}
}
void bfsGPU(int start, Graph &G, vector<int> &distance, vector<bool> &visited) {
const int n_blocks = (G.numVertices + N_THREADS_PER_BLOCK - 1) / N_THREADS_PER_BLOCK;
// Initialization of GPU variables
int *d_adjacencyList;
int *d_edgesOffset;
int *d_edgesSize;
int *d_firstQueue;
int *d_secondQueue;
int *d_nextQueueSize;
int *d_distance; // output
// Initialization of CPU variables
int currentQueueSize = 1;
const int NEXT_QUEUE_SIZE = 0;
int level = 0;
// Allocation on device
const int size = G.numVertices * sizeof(int);
const int adjacencySize = G.adjacencyList.size() * sizeof(int);
hipMalloc((void **)&d_adjacencyList, adjacencySize);
hipMalloc((void **)&d_edgesOffset, size);
hipMalloc((void **)&d_edgesSize, size);
hipMalloc((void **)&d_firstQueue, size);
hipMalloc((void **)&d_secondQueue, size);
hipMalloc((void **)&d_distance, size);
hipMalloc((void **)&d_nextQueueSize, sizeof(int));
// Copy inputs to device
hipMemcpy(d_adjacencyList, &G.adjacencyList[0], adjacencySize, hipMemcpyHostToDevice);
hipMemcpy(d_edgesOffset, &G.edgesOffset[0], size, hipMemcpyHostToDevice);
hipMemcpy(d_edgesSize, &G.edgesSize[0], size, hipMemcpyHostToDevice);
hipMemcpy(d_nextQueueSize, &NEXT_QUEUE_SIZE, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_firstQueue, &start, sizeof(int), hipMemcpyHostToDevice);
// initializeDeviceArray<<<n_blocks, N_THREADS_PER_BLOCK>>> (G.numVertices, d_distance, INT_MAX, start); // FOR SOME REASON USING THIS KERNEL DOESNT WORK
// hipDeviceSynchronize();
auto startTime = chrono::steady_clock::now();
distance = vector<int> (G.numVertices, INT_MAX);
distance[start] = 0;
hipMemcpy(d_distance, distance.data(), size, hipMemcpyHostToDevice);
while (currentQueueSize > 0) {
int *d_currentQueue;
int *d_nextQueue;
if (level % 2 == 0) {
d_currentQueue = d_firstQueue;
d_nextQueue = d_secondQueue;
}
else {
d_currentQueue = d_secondQueue;
d_nextQueue = d_firstQueue;
}
hipLaunchKernelGGL(( computeNextQueue), dim3(n_blocks), dim3(N_THREADS_PER_BLOCK), 0, 0, d_adjacencyList, d_edgesOffset, d_edgesSize, d_distance,
currentQueueSize, d_currentQueue, d_nextQueueSize, d_nextQueue, level);
hipDeviceSynchronize();
++level;
hipMemcpy(¤tQueueSize, d_nextQueueSize, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(d_nextQueueSize, &NEXT_QUEUE_SIZE, sizeof(int), hipMemcpyHostToDevice);
}
hipMemcpy(&distance[0], d_distance, size, hipMemcpyDeviceToHost);
auto endTime = std::chrono::steady_clock::now();
auto duration = chrono::duration_cast<chrono::milliseconds>(endTime - startTime).count();
printf("Elapsed time for naive linear GPU implementation (without copying graph) : %li ms.\n", duration);
// Cleanup
hipFree(d_adjacencyList);
hipFree(d_edgesOffset);
hipFree(d_edgesSize);
hipFree(d_firstQueue);
hipFree(d_secondQueue);
hipFree(d_distance);
}
| f8a2cb070ceb3bd56ea643c5a1e8af301a44954c.cu | #include "bfs_simple.cuh"
using namespace std;
#define DEBUG(x)
#define N_THREADS_PER_BLOCK (1 << 5)
__global__
void initializeDeviceArray(int n, int *d_arr, int value, int start_index) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == start_index) {
d_arr[start_index] = 0;
}
else if (tid < n) {
d_arr[tid] = value;
}
}
__global__
void printDeviceArray(int *d_arr, int n) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n) {
printf("d_arr[%i] = %i \n", tid, d_arr[tid]);
}
}
/*
* Given a graph and a current queue computes next vertices (vertex frontiers) to traverse.
*/
__global__
void computeNextQueue(int *adjacencyList, int *edgesOffset, int *edgesSize, int *distance,
int queueSize, int *currentQueue, int *nextQueueSize, int *nextQueue, int level) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x; // thread id
if (tid < queueSize) { // visit all vertexes in a queue in parallel
int current = currentQueue[tid];
for (int i = edgesOffset[current]; i < edgesOffset[current] + edgesSize[current]; ++i) {
int v = adjacencyList[i];
if (distance[v] == INT_MAX) {
distance[v] = level + 1;
int position = atomicAdd(nextQueueSize, 1);
nextQueue[position] = v;
}
}
}
}
void bfsGPU(int start, Graph &G, vector<int> &distance, vector<bool> &visited) {
const int n_blocks = (G.numVertices + N_THREADS_PER_BLOCK - 1) / N_THREADS_PER_BLOCK;
// Initialization of GPU variables
int *d_adjacencyList;
int *d_edgesOffset;
int *d_edgesSize;
int *d_firstQueue;
int *d_secondQueue;
int *d_nextQueueSize;
int *d_distance; // output
// Initialization of CPU variables
int currentQueueSize = 1;
const int NEXT_QUEUE_SIZE = 0;
int level = 0;
// Allocation on device
const int size = G.numVertices * sizeof(int);
const int adjacencySize = G.adjacencyList.size() * sizeof(int);
cudaMalloc((void **)&d_adjacencyList, adjacencySize);
cudaMalloc((void **)&d_edgesOffset, size);
cudaMalloc((void **)&d_edgesSize, size);
cudaMalloc((void **)&d_firstQueue, size);
cudaMalloc((void **)&d_secondQueue, size);
cudaMalloc((void **)&d_distance, size);
cudaMalloc((void **)&d_nextQueueSize, sizeof(int));
// Copy inputs to device
cudaMemcpy(d_adjacencyList, &G.adjacencyList[0], adjacencySize, cudaMemcpyHostToDevice);
cudaMemcpy(d_edgesOffset, &G.edgesOffset[0], size, cudaMemcpyHostToDevice);
cudaMemcpy(d_edgesSize, &G.edgesSize[0], size, cudaMemcpyHostToDevice);
cudaMemcpy(d_nextQueueSize, &NEXT_QUEUE_SIZE, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_firstQueue, &start, sizeof(int), cudaMemcpyHostToDevice);
// initializeDeviceArray<<<n_blocks, N_THREADS_PER_BLOCK>>> (G.numVertices, d_distance, INT_MAX, start); // FOR SOME REASON USING THIS KERNEL DOESNT WORK
// cudaDeviceSynchronize();
auto startTime = chrono::steady_clock::now();
distance = vector<int> (G.numVertices, INT_MAX);
distance[start] = 0;
cudaMemcpy(d_distance, distance.data(), size, cudaMemcpyHostToDevice);
while (currentQueueSize > 0) {
int *d_currentQueue;
int *d_nextQueue;
if (level % 2 == 0) {
d_currentQueue = d_firstQueue;
d_nextQueue = d_secondQueue;
}
else {
d_currentQueue = d_secondQueue;
d_nextQueue = d_firstQueue;
}
computeNextQueue<<<n_blocks, N_THREADS_PER_BLOCK>>> (d_adjacencyList, d_edgesOffset, d_edgesSize, d_distance,
currentQueueSize, d_currentQueue, d_nextQueueSize, d_nextQueue, level);
cudaDeviceSynchronize();
++level;
cudaMemcpy(¤tQueueSize, d_nextQueueSize, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(d_nextQueueSize, &NEXT_QUEUE_SIZE, sizeof(int), cudaMemcpyHostToDevice);
}
cudaMemcpy(&distance[0], d_distance, size, cudaMemcpyDeviceToHost);
auto endTime = std::chrono::steady_clock::now();
auto duration = chrono::duration_cast<chrono::milliseconds>(endTime - startTime).count();
printf("Elapsed time for naive linear GPU implementation (without copying graph) : %li ms.\n", duration);
// Cleanup
cudaFree(d_adjacencyList);
cudaFree(d_edgesOffset);
cudaFree(d_edgesSize);
cudaFree(d_firstQueue);
cudaFree(d_secondQueue);
cudaFree(d_distance);
}
|
749c0aecab1b71f31f0a9a1f2c738cd56804a388.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// by Jan Eric Kyprianidis <www.kyprianidis.com>
// Copyright (C) 2010-2012 Computer Graphics Systems Group at the
// Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
#include "gpu_gauss.h"
static texture<float, 2, hipReadModeElementType> texSRC1;
static texture<float4, 2, hipReadModeElementType> texSRC4;
template<typename T> T texSRC(float x, float y);
template<> inline __device__ float texSRC(float x, float y) { return tex2D(texSRC1, x, y); }
template<> inline __device__ float4 texSRC(float x, float y) { return tex2D(texSRC4, x, y); }
static texture<float, 2, hipReadModeElementType> texSIGMA;
struct texSIGMA_t {
inline __device__ float operator()(int ix, int iy) { return tex2D(texSIGMA, ix, iy); }
};
template<typename T>
__global__ void imp_gauss_filter( gpu_plm2<T> dst, float sigma, float precision ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if(ix >= dst.w || iy >= dst.h)
return;
float twoSigma2 = 2.0f * sigma * sigma;
int halfWidth = int(ceilf( precision * sigma ));
T sum = make_zero<T>();
float norm = 0;
for ( int i = -halfWidth; i <= halfWidth; ++i ) {
for ( int j = -halfWidth; j <= halfWidth; ++j ) {
float d = length(make_float2(i,j));
float kernel = __expf( -d *d / twoSigma2 );
T c = texSRC<T>(ix + i, iy + j);
sum += kernel * c;
norm += kernel;
}
}
sum /= norm;
dst(ix, iy) = sum;
}
gpu_image<float> gpu_gauss_filter( const gpu_image<float>& src, float sigma, float precision ) {
gpu_image<float> dst(src.size());
bind(&texSRC1, src);
hipLaunchKernelGGL(( imp_gauss_filter<float>), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, dst, sigma, precision);
GPU_CHECK_ERROR();
return dst;
}
gpu_image<float4> gpu_gauss_filter( const gpu_image<float4>& src, float sigma, float precision ) {
gpu_image<float4> dst(src.size());
bind(&texSRC4, src);
hipLaunchKernelGGL(( imp_gauss_filter<float4>), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, dst, sigma, precision);
GPU_CHECK_ERROR();
return dst;
}
template<typename T, int dx, int dy>
__global__ void imp_gauss_filter_xy( gpu_plm2<T> dst, float sigma, float precision ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float twoSigma2 = 2.0f * sigma * sigma;
int halfWidth = ceilf( precision * sigma );
T sum = texSRC<T>(ix, iy);
float norm = 1;
for ( int i = 1; i <= halfWidth; ++i ) {
float kernel = __expf( -i *i / twoSigma2 );
sum += kernel * (texSRC<T>(ix + dx * i, iy + dy * i) + texSRC<T>(ix - dx * i, iy - dy * i));
norm += 2 * kernel;
}
sum /= norm;
dst(ix, iy) = sum;
}
gpu_image<float> gpu_gauss_filter_xy( const gpu_image<float>& src, float sigma, float precision ) {
if (sigma <= 0) return src;
gpu_image<float> dst(src.size());
gpu_image<float> tmp(src.size());
bind(&texSRC1, src);
hipLaunchKernelGGL(( imp_gauss_filter_xy<float,1,0>), dim3(tmp.blocks()), dim3(tmp.threads()), 0, 0, tmp, sigma, precision);
GPU_CHECK_ERROR();
bind(&texSRC1, tmp);
hipLaunchKernelGGL(( imp_gauss_filter_xy<float,0,1>), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, dst, sigma, precision);
GPU_CHECK_ERROR();
return dst;
}
gpu_image<float4> gpu_gauss_filter_xy( const gpu_image<float4>& src, float sigma, float precision ) {
if (sigma <= 0) return src;
gpu_image<float4> dst(src.size());
gpu_image<float4> tmp(src.size());
bind(&texSRC4, src);
hipLaunchKernelGGL(( imp_gauss_filter_xy<float4,1,0>), dim3(tmp.blocks()), dim3(tmp.threads()), 0, 0, tmp, sigma, precision);
GPU_CHECK_ERROR();
bind(&texSRC4, tmp);
hipLaunchKernelGGL(( imp_gauss_filter_xy<float4,0,1>), dim3(dst.blocks()), dim3(tmp.threads()), 0, 0, dst, sigma, precision);
GPU_CHECK_ERROR();
return dst;
}
// [0.216, 0.568, 0.216], sigma ~= 0.680
template<typename T>
__global__ void imp_gauss_filter_3x3( gpu_plm2<T> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
T sum =
( 0.046656f * texSRC<T>(ix-1, iy-1) +
0.122688f * texSRC<T>(ix, iy-1) +
0.046656f * texSRC<T>(ix+1, iy-1) +
0.122688f * texSRC<T>(ix-1, iy) +
0.322624f * texSRC<T>(ix, iy) +
0.122688f * texSRC<T>(ix+1, iy) +
0.046656f * texSRC<T>(ix-1, iy+1) +
0.122688f * texSRC<T>(ix, iy+1) +
0.046656f * texSRC<T>(ix+1, iy+1)
);
dst(ix, iy) = sum;
}
gpu_image<float4> gpu_gauss_filter_3x3( const gpu_image<float4>& src) {
gpu_image<float4> dst(src.size());
bind(&texSRC4, src);
hipLaunchKernelGGL(( imp_gauss_filter_3x3<float4>), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, dst);
GPU_CHECK_ERROR();
return dst;
}
// [0.03134, 0.24, 0.45732, 0.24, 0.03134], sigma ~= 0.867
template<typename T>
__global__ void imp_gauss_filter_5x5( gpu_plm2<T> dst ) {
const float kernel[5][5] = {
{ 0.0009821956f, 0.0075216f, 0.0143324088f, 0.0075216f, 0.0009821956 },
{ 0.0075216f, 0.0576f, 0.1097568f, 0.0576f, 0.0075216 },
{ 0.0143324088f, 0.1097568f, 0.2091415824f, 0.1097568f, 0.0143324088 },
{ 0.0075216f, 0.0576f, 0.1097568f, 0.0576f, 0.0075216 },
{ 0.0009821956f, 0.0075216f, 0.0143324088f, 0.0075216f, 0.0009821956 }
};
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
T sum = make_zero<T>();
for ( int j = 0; j < 5; ++j ) {
for ( int i = 0; i < 5; ++i ) {
T c = texSRC<T>(ix + i -2, iy + j - 2);
sum += kernel[j][i] * c;
}
}
dst(ix, iy) = sum;
}
gpu_image<float4> gpu_gauss_filter_5x5( const gpu_image<float4>& src) {
gpu_image<float4> dst(src.size());
bind(&texSRC4, src);
hipLaunchKernelGGL(( imp_gauss_filter_5x5<float4>), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, dst);
GPU_CHECK_ERROR();
return dst;
}
| 749c0aecab1b71f31f0a9a1f2c738cd56804a388.cu | //
// by Jan Eric Kyprianidis <www.kyprianidis.com>
// Copyright (C) 2010-2012 Computer Graphics Systems Group at the
// Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
#include "gpu_gauss.h"
static texture<float, 2, cudaReadModeElementType> texSRC1;
static texture<float4, 2, cudaReadModeElementType> texSRC4;
template<typename T> T texSRC(float x, float y);
template<> inline __device__ float texSRC(float x, float y) { return tex2D(texSRC1, x, y); }
template<> inline __device__ float4 texSRC(float x, float y) { return tex2D(texSRC4, x, y); }
static texture<float, 2, cudaReadModeElementType> texSIGMA;
struct texSIGMA_t {
inline __device__ float operator()(int ix, int iy) { return tex2D(texSIGMA, ix, iy); }
};
template<typename T>
__global__ void imp_gauss_filter( gpu_plm2<T> dst, float sigma, float precision ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if(ix >= dst.w || iy >= dst.h)
return;
float twoSigma2 = 2.0f * sigma * sigma;
int halfWidth = int(ceilf( precision * sigma ));
T sum = make_zero<T>();
float norm = 0;
for ( int i = -halfWidth; i <= halfWidth; ++i ) {
for ( int j = -halfWidth; j <= halfWidth; ++j ) {
float d = length(make_float2(i,j));
float kernel = __expf( -d *d / twoSigma2 );
T c = texSRC<T>(ix + i, iy + j);
sum += kernel * c;
norm += kernel;
}
}
sum /= norm;
dst(ix, iy) = sum;
}
gpu_image<float> gpu_gauss_filter( const gpu_image<float>& src, float sigma, float precision ) {
gpu_image<float> dst(src.size());
bind(&texSRC1, src);
imp_gauss_filter<float><<<dst.blocks(), dst.threads()>>>(dst, sigma, precision);
GPU_CHECK_ERROR();
return dst;
}
gpu_image<float4> gpu_gauss_filter( const gpu_image<float4>& src, float sigma, float precision ) {
gpu_image<float4> dst(src.size());
bind(&texSRC4, src);
imp_gauss_filter<float4><<<dst.blocks(), dst.threads()>>>(dst, sigma, precision);
GPU_CHECK_ERROR();
return dst;
}
template<typename T, int dx, int dy>
__global__ void imp_gauss_filter_xy( gpu_plm2<T> dst, float sigma, float precision ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float twoSigma2 = 2.0f * sigma * sigma;
int halfWidth = ceilf( precision * sigma );
T sum = texSRC<T>(ix, iy);
float norm = 1;
for ( int i = 1; i <= halfWidth; ++i ) {
float kernel = __expf( -i *i / twoSigma2 );
sum += kernel * (texSRC<T>(ix + dx * i, iy + dy * i) + texSRC<T>(ix - dx * i, iy - dy * i));
norm += 2 * kernel;
}
sum /= norm;
dst(ix, iy) = sum;
}
gpu_image<float> gpu_gauss_filter_xy( const gpu_image<float>& src, float sigma, float precision ) {
if (sigma <= 0) return src;
gpu_image<float> dst(src.size());
gpu_image<float> tmp(src.size());
bind(&texSRC1, src);
imp_gauss_filter_xy<float,1,0><<<tmp.blocks(), tmp.threads()>>>(tmp, sigma, precision);
GPU_CHECK_ERROR();
bind(&texSRC1, tmp);
imp_gauss_filter_xy<float,0,1><<<dst.blocks(), dst.threads()>>>(dst, sigma, precision);
GPU_CHECK_ERROR();
return dst;
}
gpu_image<float4> gpu_gauss_filter_xy( const gpu_image<float4>& src, float sigma, float precision ) {
if (sigma <= 0) return src;
gpu_image<float4> dst(src.size());
gpu_image<float4> tmp(src.size());
bind(&texSRC4, src);
imp_gauss_filter_xy<float4,1,0><<<tmp.blocks(), tmp.threads()>>>(tmp, sigma, precision);
GPU_CHECK_ERROR();
bind(&texSRC4, tmp);
imp_gauss_filter_xy<float4,0,1><<<dst.blocks(), tmp.threads()>>>(dst, sigma, precision);
GPU_CHECK_ERROR();
return dst;
}
// [0.216, 0.568, 0.216], sigma ~= 0.680
template<typename T>
__global__ void imp_gauss_filter_3x3( gpu_plm2<T> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
T sum =
( 0.046656f * texSRC<T>(ix-1, iy-1) +
0.122688f * texSRC<T>(ix, iy-1) +
0.046656f * texSRC<T>(ix+1, iy-1) +
0.122688f * texSRC<T>(ix-1, iy) +
0.322624f * texSRC<T>(ix, iy) +
0.122688f * texSRC<T>(ix+1, iy) +
0.046656f * texSRC<T>(ix-1, iy+1) +
0.122688f * texSRC<T>(ix, iy+1) +
0.046656f * texSRC<T>(ix+1, iy+1)
);
dst(ix, iy) = sum;
}
gpu_image<float4> gpu_gauss_filter_3x3( const gpu_image<float4>& src) {
gpu_image<float4> dst(src.size());
bind(&texSRC4, src);
imp_gauss_filter_3x3<float4><<<dst.blocks(), dst.threads()>>>(dst);
GPU_CHECK_ERROR();
return dst;
}
// [0.03134, 0.24, 0.45732, 0.24, 0.03134], sigma ~= 0.867
template<typename T>
__global__ void imp_gauss_filter_5x5( gpu_plm2<T> dst ) {
const float kernel[5][5] = {
{ 0.0009821956f, 0.0075216f, 0.0143324088f, 0.0075216f, 0.0009821956 },
{ 0.0075216f, 0.0576f, 0.1097568f, 0.0576f, 0.0075216 },
{ 0.0143324088f, 0.1097568f, 0.2091415824f, 0.1097568f, 0.0143324088 },
{ 0.0075216f, 0.0576f, 0.1097568f, 0.0576f, 0.0075216 },
{ 0.0009821956f, 0.0075216f, 0.0143324088f, 0.0075216f, 0.0009821956 }
};
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
T sum = make_zero<T>();
for ( int j = 0; j < 5; ++j ) {
for ( int i = 0; i < 5; ++i ) {
T c = texSRC<T>(ix + i -2, iy + j - 2);
sum += kernel[j][i] * c;
}
}
dst(ix, iy) = sum;
}
gpu_image<float4> gpu_gauss_filter_5x5( const gpu_image<float4>& src) {
gpu_image<float4> dst(src.size());
bind(&texSRC4, src);
imp_gauss_filter_5x5<float4><<<dst.blocks(), dst.threads()>>>(dst);
GPU_CHECK_ERROR();
return dst;
}
|
77e12edc9786f4c1c01b4c0417afbdd3850ed625.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include <stdio.h>
__device__
uint8_t rule(uint8_t alive, uint8_t neighbours) {
return (neighbours == 3) || (alive && (neighbours == 2));
}
__global__
void update_tile_inside_gpu(uint8_t* src, uint8_t* dst, int wide_size, int margin_width) {
int neighbour_offsets[8] = {
-wide_size - 1, -wide_size, -wide_size + 1,
-1, 1,
wide_size - 1, wide_size, wide_size + 1
};
int start = margin_width;
int end = wide_size - margin_width;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
/* printf("i = %d, j = %d\n", i, j); */
if (i >= start && i < end
&& j >= start && j < end) {
int neighbours = 0;
int base = j * wide_size + i;
for (int k = 0; k < 8; k++) {
neighbours += src[base + neighbour_offsets[k]];
}
dst[base] = rule(src[base], neighbours);
}
}
extern "C"
__host__
void update_tile_kernel_call(uint8_t **cells,
int wide_size,
int current_step,
int margin_iterations) {
dim3 numBlocks(wide_size / 32 + 1, wide_size / 32 + 1);
dim3 threadsPerBlocks(32, 32);
for (int growing_margin = 1;
growing_margin <= margin_iterations;
growing_margin++) {
int src_index = (current_step + growing_margin - 1) % 2;
hipLaunchKernelGGL(( update_tile_inside_gpu), dim3(numBlocks), dim3(threadsPerBlocks) , 0, 0,
cells[src_index],
cells[!src_index],
wide_size,
growing_margin);
}
}
| 77e12edc9786f4c1c01b4c0417afbdd3850ed625.cu | #include <stdint.h>
#include <stdio.h>
__device__
uint8_t rule(uint8_t alive, uint8_t neighbours) {
return (neighbours == 3) || (alive && (neighbours == 2));
}
__global__
void update_tile_inside_gpu(uint8_t* src, uint8_t* dst, int wide_size, int margin_width) {
int neighbour_offsets[8] = {
-wide_size - 1, -wide_size, -wide_size + 1,
-1, 1,
wide_size - 1, wide_size, wide_size + 1
};
int start = margin_width;
int end = wide_size - margin_width;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
/* printf("i = %d, j = %d\n", i, j); */
if (i >= start && i < end
&& j >= start && j < end) {
int neighbours = 0;
int base = j * wide_size + i;
for (int k = 0; k < 8; k++) {
neighbours += src[base + neighbour_offsets[k]];
}
dst[base] = rule(src[base], neighbours);
}
}
extern "C"
__host__
void update_tile_kernel_call(uint8_t **cells,
int wide_size,
int current_step,
int margin_iterations) {
dim3 numBlocks(wide_size / 32 + 1, wide_size / 32 + 1);
dim3 threadsPerBlocks(32, 32);
for (int growing_margin = 1;
growing_margin <= margin_iterations;
growing_margin++) {
int src_index = (current_step + growing_margin - 1) % 2;
update_tile_inside_gpu<<< numBlocks, threadsPerBlocks >>>
(cells[src_index],
cells[!src_index],
wide_size,
growing_margin);
}
}
|
473b9a5ad0d8739092e1790c42dbe389681b4898.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void _kgauss32(int mx, int ns, float *xval, int *xrow, int *xcol, float *sval, int *srow, int *scol, float g, float *k) {
// assume x(mx,nd) and s(nd,ns) are in 1-based csc format
// assume k(mx,ns) has been allocated and zeroed out
int s0, s1, sp, sc, sr, x0, x1, xp, xc, xr, k0, k1, kp;
float sv, xv, xs;
sc = threadIdx.x + blockIdx.x * blockDim.x;
k0 = mx*sc; // k[k0]: first element of k[:,sc]
k1 = k0+mx; // k[k1-1]: last element of k[:,sc]
while (sc < ns) { // sc: 0-based column for s
s0 = scol[sc]-1; // first element of s[:,sc] is at sval[s0] (scol entries are 1-based)
s1 = scol[sc+1]-1; // last element of s[:,sc] is at sval[s1-1]
for (sp = s0; sp < s1; sp++) {
sr = srow[sp]-1; // sr: 0-based row for s (srow entries are 1-based)
sv = sval[sp]; // sv: s[sr,sc] (0-based)
xc = sr; // xc: 0-based column for x (=sr)
x0 = xcol[xc]-1; // first element of x[:,xc] is at xval[x0]
x1 = xcol[xc+1]-1; // last element of x[:,xc] is at xval[x1-1]
for (xp = x0; xp < x1; xp++) {
xr = xrow[xp]-1; // xr: 0-based row for x
xv = xval[xp]; // xv: x[xr,xc=sr], now we can set k[xr,sc]
xs = xv - sv;
k[k0+xr] += xs*xs; // k += (xi-si)^2
}
}
for (kp = k0; kp < k1; kp++) {
k[kp] = exp(-g*k[kp]); // k = exp(-g*sum((xi-si)^2))
}
sc += blockDim.x * gridDim.x;
}
} | 473b9a5ad0d8739092e1790c42dbe389681b4898.cu | #include "includes.h"
__global__ void _kgauss32(int mx, int ns, float *xval, int *xrow, int *xcol, float *sval, int *srow, int *scol, float g, float *k) {
// assume x(mx,nd) and s(nd,ns) are in 1-based csc format
// assume k(mx,ns) has been allocated and zeroed out
int s0, s1, sp, sc, sr, x0, x1, xp, xc, xr, k0, k1, kp;
float sv, xv, xs;
sc = threadIdx.x + blockIdx.x * blockDim.x;
k0 = mx*sc; // k[k0]: first element of k[:,sc]
k1 = k0+mx; // k[k1-1]: last element of k[:,sc]
while (sc < ns) { // sc: 0-based column for s
s0 = scol[sc]-1; // first element of s[:,sc] is at sval[s0] (scol entries are 1-based)
s1 = scol[sc+1]-1; // last element of s[:,sc] is at sval[s1-1]
for (sp = s0; sp < s1; sp++) {
sr = srow[sp]-1; // sr: 0-based row for s (srow entries are 1-based)
sv = sval[sp]; // sv: s[sr,sc] (0-based)
xc = sr; // xc: 0-based column for x (=sr)
x0 = xcol[xc]-1; // first element of x[:,xc] is at xval[x0]
x1 = xcol[xc+1]-1; // last element of x[:,xc] is at xval[x1-1]
for (xp = x0; xp < x1; xp++) {
xr = xrow[xp]-1; // xr: 0-based row for x
xv = xval[xp]; // xv: x[xr,xc=sr], now we can set k[xr,sc]
xs = xv - sv;
k[k0+xr] += xs*xs; // k += (xi-si)^2
}
}
for (kp = k0; kp < k1; kp++) {
k[kp] = exp(-g*k[kp]); // k = exp(-g*sum((xi-si)^2))
}
sc += blockDim.x * gridDim.x;
}
} |
a48aadbe3df8762dca46147e1702ab9d31a01df4.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2015-2022 by XGBoost Contributors
* \file elementwise_metric.cc
* \brief evaluation metrics for elementwise binary or regression.
* \author Kailong Chen, Tianqi Chen
*
* The expressions like wsum == 0 ? esum : esum / wsum is used to handle empty dataset.
*/
#include <dmlc/registry.h>
#include <rabit/rabit.h>
#include <xgboost/metric.h>
#include <cmath>
#include "../common/common.h"
#include "../common/math.h"
#include "../common/pseudo_huber.h"
#include "../common/threading_utils.h"
#include "metric_common.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::hip::par
#include <thrust/functional.h> // thrust::plus<>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(elementwise_metric);
namespace {
/**
* \brief Reduce function for element wise metrics.
*
* The loss function should handle all the computation for each sample, including
* applying the weights. A tuple of {error_i, weight_i} is expected as return.
*/
template <typename Fn>
PackedReduceResult Reduce(GenericParameter const* ctx, MetaInfo const& info, Fn&& loss) {
PackedReduceResult result;
auto labels = info.labels.View(ctx->gpu_id);
if (ctx->IsCPU()) {
auto n_threads = ctx->Threads();
std::vector<double> score_tloc(n_threads, 0.0);
std::vector<double> weight_tloc(n_threads, 0.0);
// We sum over losses over all samples and targets instead of performing this for each
// target since the first one approach more accurate while the second approach is used
// for approximation in distributed setting. For rmse:
// - sqrt(1/w(sum_t0 + sum_t1 + ... + sum_tm)) // multi-target
// - sqrt(avg_t0) + sqrt(avg_t1) + ... sqrt(avg_tm) // distributed
common::ParallelFor(info.labels.Size(), ctx->Threads(), [&](size_t i) {
auto t_idx = omp_get_thread_num();
size_t sample_id;
size_t target_id;
std::tie(sample_id, target_id) = linalg::UnravelIndex(i, labels.Shape());
float v, wt;
std::tie(v, wt) = loss(i, sample_id, target_id);
score_tloc[t_idx] += v;
weight_tloc[t_idx] += wt;
});
double residue_sum = std::accumulate(score_tloc.cbegin(), score_tloc.cend(), 0.0);
double weights_sum = std::accumulate(weight_tloc.cbegin(), weight_tloc.cend(), 0.0);
result = PackedReduceResult{residue_sum, weights_sum};
} else {
#if defined(XGBOOST_USE_CUDA)
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + labels.Size();
result = thrust::transform_reduce(
thrust::hip::par(alloc), begin, end,
[=] XGBOOST_DEVICE(size_t i) {
auto idx = linalg::UnravelIndex(i, labels.Shape());
auto sample_id = std::get<0>(idx);
auto target_id = std::get<1>(idx);
auto res = loss(i, sample_id, target_id);
float v{std::get<0>(res)}, wt{std::get<1>(res)};
return PackedReduceResult{v, wt};
},
PackedReduceResult{}, thrust::plus<PackedReduceResult>());
#else
common::AssertGPUSupport();
#endif // defined(XGBOOST_USE_CUDA)
}
return result;
}
} // anonymous namespace
struct EvalRowRMSE {
char const *Name() const {
return "rmse";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return diff * diff;
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowRMSLE {
char const* Name() const {
return "rmsle";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = std::log1p(label) - std::log1p(pred);
return diff * diff;
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowMAE {
const char *Name() const {
return "mae";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs(label - pred);
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowMAPE {
const char *Name() const {
return "mape";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs((label - pred) / label);
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
namespace {
XGBOOST_DEVICE inline float LogLoss(float y, float py) {
auto xlogy = [](float x, float y) {
float eps = 1e-16;
return (x - 0.0f == 0.0f) ? 0.0f : (x * ::log(::max(y, eps)));
};
const bst_float pneg = 1.0f - py;
return xlogy(-y, py) + xlogy(-(1.0f - y), pneg);
}
} // anonymous namespace
struct EvalRowLogLoss {
const char *Name() const {
return "logloss";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const { return LogLoss(y, py); }
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
class PseudoErrorLoss : public Metric {
PesudoHuberParam param_;
public:
const char* Name() const override { return "mphe"; }
void Configure(Args const& args) override { param_.UpdateAllowUnknown(args); }
void LoadConfig(Json const& in) override { FromJson(in["pseudo_huber_param"], ¶m_); }
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(this->Name());
out["pseudo_huber_param"] = ToJson(param_);
}
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override {
CHECK_EQ(info.labels.Shape(0), info.num_row_);
auto labels = info.labels.View(tparam_->gpu_id);
preds.SetDevice(tparam_->gpu_id);
auto predts = tparam_->IsCPU() ? preds.ConstHostSpan() : preds.ConstDeviceSpan();
info.weights_.SetDevice(tparam_->gpu_id);
common::OptionalWeights weights(tparam_->IsCPU() ? info.weights_.ConstHostSpan()
: info.weights_.ConstDeviceSpan());
float slope = this->param_.huber_slope;
CHECK_NE(slope, 0.0) << "slope for pseudo huber cannot be 0.";
PackedReduceResult result =
Reduce(tparam_, info, [=] XGBOOST_DEVICE(size_t i, size_t sample_id, size_t target_id) {
float wt = weights[sample_id];
auto a = labels(sample_id, target_id) - predts[i];
auto v = common::Sqr(slope) * (std::sqrt((1 + common::Sqr(a / slope))) - 1) * wt;
return std::make_tuple(v, wt);
});
double dat[2]{result.Residue(), result.Weights()};
if (rabit::IsDistributed()) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return EvalRowMAPE::GetFinal(dat[0], dat[1]);
}
};
struct EvalError {
explicit EvalError(const char* param) {
if (param != nullptr) {
CHECK_EQ(sscanf(param, "%f", &threshold_), 1)
<< "unable to parse the threshold value for the error metric";
has_param_ = true;
} else {
threshold_ = 0.5f;
has_param_ = false;
}
}
const char *Name() const {
static std::string name;
if (has_param_) {
std::ostringstream os;
os << "error";
if (threshold_ != 0.5f) os << '@' << threshold_;
name = os.str();
return name.c_str();
} else {
return "error";
}
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
// assume label is in [0,1]
return pred > threshold_ ? 1.0f - label : label;
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
private:
bst_float threshold_;
bool has_param_;
};
struct EvalPoissonNegLogLik {
const char *Name() const {
return "poisson-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
if (py < eps) py = eps;
return common::LogGamma(y + 1.0f) + py - ::log(py) * y;
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
/**
* Gamma deviance
*
* Expected input:
* label >= 0
* predt >= 0
*/
struct EvalGammaDeviance {
const char *Name() const { return "gamma-deviance"; }
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float predt) const {
predt += kRtEps;
label += kRtEps;
return ::log(predt / label) + label / predt - 1;
}
static double GetFinal(double esum, double wsum) {
if (wsum <= 0) {
wsum = kRtEps;
}
return 2 * esum / wsum;
}
};
struct EvalGammaNLogLik {
static const char *Name() {
return "gamma-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
py = ::max(py, 1e-6f);
// hardcoded dispersion.
float constexpr kPsi = 1.0;
bst_float theta = -1. / py;
bst_float a = kPsi;
float b = -::log(-theta);
// c = 1. / kPsi^2 * ::log(y/kPsi) - ::log(y) - common::LogGamma(1. / kPsi);
// = 1.0f * ::log(y) - ::log(y) - 0 = 0
float c = 0;
// general form for exponential family.
return -((y * theta - b) / a + c);
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalTweedieNLogLik {
explicit EvalTweedieNLogLik(const char* param) {
CHECK(param != nullptr)
<< "tweedie-nloglik must be in format tweedie-nloglik@rho";
rho_ = atof(param);
CHECK(rho_ < 2 && rho_ >= 1)
<< "tweedie variance power must be in interval [1, 2)";
}
const char *Name() const {
static std::string name;
std::ostringstream os;
os << "tweedie-nloglik@" << rho_;
name = os.str();
return name.c_str();
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float p) const {
bst_float a = y * ::exp((1 - rho_) * ::log(p)) / (1 - rho_);
bst_float b = ::exp((2 - rho_) * ::log(p)) / (2 - rho_);
return -a + b;
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
protected:
bst_float rho_;
};
/*!
* \brief base class of element-wise evaluation
* \tparam Derived the name of subclass
*/
template <typename Policy>
struct EvalEWiseBase : public Metric {
EvalEWiseBase() = default;
explicit EvalEWiseBase(char const* policy_param) : policy_{policy_param} {}
double Eval(HostDeviceVector<bst_float> const& preds, const MetaInfo& info) override {
CHECK_EQ(preds.Size(), info.labels.Size())
<< "label and prediction size not match, "
<< "hint: use merror or mlogloss for multi-class classification";
if (info.labels.Size() != 0) {
CHECK_NE(info.labels.Shape(1), 0);
}
auto labels = info.labels.View(tparam_->gpu_id);
info.weights_.SetDevice(tparam_->gpu_id);
common::OptionalWeights weights(tparam_->IsCPU() ? info.weights_.ConstHostSpan()
: info.weights_.ConstDeviceSpan());
preds.SetDevice(tparam_->gpu_id);
auto predts = tparam_->IsCPU() ? preds.ConstHostSpan() : preds.ConstDeviceSpan();
auto d_policy = policy_;
auto result =
Reduce(tparam_, info, [=] XGBOOST_DEVICE(size_t i, size_t sample_id, size_t target_id) {
float wt = weights[sample_id];
float residue = d_policy.EvalRow(labels(sample_id, target_id), predts[i]);
residue *= wt;
return std::make_tuple(residue, wt);
});
double dat[2]{result.Residue(), result.Weights()};
rabit::Allreduce<rabit::op::Sum>(dat, 2);
return Policy::GetFinal(dat[0], dat[1]);
}
const char* Name() const override { return policy_.Name(); }
private:
Policy policy_;
};
XGBOOST_REGISTER_METRIC(RMSE, "rmse")
.describe("Rooted mean square error.")
.set_body([](const char*) { return new EvalEWiseBase<EvalRowRMSE>(); });
XGBOOST_REGISTER_METRIC(RMSLE, "rmsle")
.describe("Rooted mean square log error.")
.set_body([](const char*) { return new EvalEWiseBase<EvalRowRMSLE>(); });
XGBOOST_REGISTER_METRIC(MAE, "mae").describe("Mean absolute error.").set_body([](const char*) {
return new EvalEWiseBase<EvalRowMAE>();
});
XGBOOST_REGISTER_METRIC(MAPE, "mape")
.describe("Mean absolute percentage error.")
.set_body([](const char*) { return new EvalEWiseBase<EvalRowMAPE>(); });
XGBOOST_REGISTER_METRIC(LogLoss, "logloss")
.describe("Negative loglikelihood for logistic regression.")
.set_body([](const char*) { return new EvalEWiseBase<EvalRowLogLoss>(); });
XGBOOST_REGISTER_METRIC(PseudoErrorLoss, "mphe")
.describe("Mean Pseudo-huber error.")
.set_body([](const char*) { return new PseudoErrorLoss{}; });
XGBOOST_REGISTER_METRIC(PossionNegLoglik, "poisson-nloglik")
.describe("Negative loglikelihood for poisson regression.")
.set_body([](const char*) { return new EvalEWiseBase<EvalPoissonNegLogLik>(); });
XGBOOST_REGISTER_METRIC(GammaDeviance, "gamma-deviance")
.describe("Residual deviance for gamma regression.")
.set_body([](const char*) { return new EvalEWiseBase<EvalGammaDeviance>(); });
XGBOOST_REGISTER_METRIC(GammaNLogLik, "gamma-nloglik")
.describe("Negative log-likelihood for gamma regression.")
.set_body([](const char*) { return new EvalEWiseBase<EvalGammaNLogLik>(); });
XGBOOST_REGISTER_METRIC(Error, "error")
.describe("Binary classification error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalError>(param); });
XGBOOST_REGISTER_METRIC(TweedieNLogLik, "tweedie-nloglik")
.describe("tweedie-nloglik@rho for tweedie regression.")
.set_body([](const char* param) {
return new EvalEWiseBase<EvalTweedieNLogLik>(param);
});
} // namespace metric
} // namespace xgboost
| a48aadbe3df8762dca46147e1702ab9d31a01df4.cu | /*!
* Copyright 2015-2022 by XGBoost Contributors
* \file elementwise_metric.cc
* \brief evaluation metrics for elementwise binary or regression.
* \author Kailong Chen, Tianqi Chen
*
* The expressions like wsum == 0 ? esum : esum / wsum is used to handle empty dataset.
*/
#include <dmlc/registry.h>
#include <rabit/rabit.h>
#include <xgboost/metric.h>
#include <cmath>
#include "../common/common.h"
#include "../common/math.h"
#include "../common/pseudo_huber.h"
#include "../common/threading_utils.h"
#include "metric_common.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::cuda::par
#include <thrust/functional.h> // thrust::plus<>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(elementwise_metric);
namespace {
/**
* \brief Reduce function for element wise metrics.
*
* The loss function should handle all the computation for each sample, including
* applying the weights. A tuple of {error_i, weight_i} is expected as return.
*/
template <typename Fn>
PackedReduceResult Reduce(GenericParameter const* ctx, MetaInfo const& info, Fn&& loss) {
PackedReduceResult result;
auto labels = info.labels.View(ctx->gpu_id);
if (ctx->IsCPU()) {
auto n_threads = ctx->Threads();
std::vector<double> score_tloc(n_threads, 0.0);
std::vector<double> weight_tloc(n_threads, 0.0);
// We sum over losses over all samples and targets instead of performing this for each
// target since the first one approach more accurate while the second approach is used
// for approximation in distributed setting. For rmse:
// - sqrt(1/w(sum_t0 + sum_t1 + ... + sum_tm)) // multi-target
// - sqrt(avg_t0) + sqrt(avg_t1) + ... sqrt(avg_tm) // distributed
common::ParallelFor(info.labels.Size(), ctx->Threads(), [&](size_t i) {
auto t_idx = omp_get_thread_num();
size_t sample_id;
size_t target_id;
std::tie(sample_id, target_id) = linalg::UnravelIndex(i, labels.Shape());
float v, wt;
std::tie(v, wt) = loss(i, sample_id, target_id);
score_tloc[t_idx] += v;
weight_tloc[t_idx] += wt;
});
double residue_sum = std::accumulate(score_tloc.cbegin(), score_tloc.cend(), 0.0);
double weights_sum = std::accumulate(weight_tloc.cbegin(), weight_tloc.cend(), 0.0);
result = PackedReduceResult{residue_sum, weights_sum};
} else {
#if defined(XGBOOST_USE_CUDA)
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + labels.Size();
result = thrust::transform_reduce(
thrust::cuda::par(alloc), begin, end,
[=] XGBOOST_DEVICE(size_t i) {
auto idx = linalg::UnravelIndex(i, labels.Shape());
auto sample_id = std::get<0>(idx);
auto target_id = std::get<1>(idx);
auto res = loss(i, sample_id, target_id);
float v{std::get<0>(res)}, wt{std::get<1>(res)};
return PackedReduceResult{v, wt};
},
PackedReduceResult{}, thrust::plus<PackedReduceResult>());
#else
common::AssertGPUSupport();
#endif // defined(XGBOOST_USE_CUDA)
}
return result;
}
} // anonymous namespace
struct EvalRowRMSE {
char const *Name() const {
return "rmse";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return diff * diff;
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowRMSLE {
char const* Name() const {
return "rmsle";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = std::log1p(label) - std::log1p(pred);
return diff * diff;
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowMAE {
const char *Name() const {
return "mae";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs(label - pred);
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowMAPE {
const char *Name() const {
return "mape";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs((label - pred) / label);
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
namespace {
XGBOOST_DEVICE inline float LogLoss(float y, float py) {
auto xlogy = [](float x, float y) {
float eps = 1e-16;
return (x - 0.0f == 0.0f) ? 0.0f : (x * std::log(std::max(y, eps)));
};
const bst_float pneg = 1.0f - py;
return xlogy(-y, py) + xlogy(-(1.0f - y), pneg);
}
} // anonymous namespace
struct EvalRowLogLoss {
const char *Name() const {
return "logloss";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const { return LogLoss(y, py); }
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
class PseudoErrorLoss : public Metric {
PesudoHuberParam param_;
public:
const char* Name() const override { return "mphe"; }
void Configure(Args const& args) override { param_.UpdateAllowUnknown(args); }
void LoadConfig(Json const& in) override { FromJson(in["pseudo_huber_param"], ¶m_); }
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(this->Name());
out["pseudo_huber_param"] = ToJson(param_);
}
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override {
CHECK_EQ(info.labels.Shape(0), info.num_row_);
auto labels = info.labels.View(tparam_->gpu_id);
preds.SetDevice(tparam_->gpu_id);
auto predts = tparam_->IsCPU() ? preds.ConstHostSpan() : preds.ConstDeviceSpan();
info.weights_.SetDevice(tparam_->gpu_id);
common::OptionalWeights weights(tparam_->IsCPU() ? info.weights_.ConstHostSpan()
: info.weights_.ConstDeviceSpan());
float slope = this->param_.huber_slope;
CHECK_NE(slope, 0.0) << "slope for pseudo huber cannot be 0.";
PackedReduceResult result =
Reduce(tparam_, info, [=] XGBOOST_DEVICE(size_t i, size_t sample_id, size_t target_id) {
float wt = weights[sample_id];
auto a = labels(sample_id, target_id) - predts[i];
auto v = common::Sqr(slope) * (std::sqrt((1 + common::Sqr(a / slope))) - 1) * wt;
return std::make_tuple(v, wt);
});
double dat[2]{result.Residue(), result.Weights()};
if (rabit::IsDistributed()) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return EvalRowMAPE::GetFinal(dat[0], dat[1]);
}
};
struct EvalError {
explicit EvalError(const char* param) {
if (param != nullptr) {
CHECK_EQ(sscanf(param, "%f", &threshold_), 1)
<< "unable to parse the threshold value for the error metric";
has_param_ = true;
} else {
threshold_ = 0.5f;
has_param_ = false;
}
}
const char *Name() const {
static std::string name;
if (has_param_) {
std::ostringstream os;
os << "error";
if (threshold_ != 0.5f) os << '@' << threshold_;
name = os.str();
return name.c_str();
} else {
return "error";
}
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
// assume label is in [0,1]
return pred > threshold_ ? 1.0f - label : label;
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
private:
bst_float threshold_;
bool has_param_;
};
struct EvalPoissonNegLogLik {
const char *Name() const {
return "poisson-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
if (py < eps) py = eps;
return common::LogGamma(y + 1.0f) + py - std::log(py) * y;
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
/**
* Gamma deviance
*
* Expected input:
* label >= 0
* predt >= 0
*/
struct EvalGammaDeviance {
const char *Name() const { return "gamma-deviance"; }
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float predt) const {
predt += kRtEps;
label += kRtEps;
return std::log(predt / label) + label / predt - 1;
}
static double GetFinal(double esum, double wsum) {
if (wsum <= 0) {
wsum = kRtEps;
}
return 2 * esum / wsum;
}
};
struct EvalGammaNLogLik {
static const char *Name() {
return "gamma-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
py = std::max(py, 1e-6f);
// hardcoded dispersion.
float constexpr kPsi = 1.0;
bst_float theta = -1. / py;
bst_float a = kPsi;
float b = -std::log(-theta);
// c = 1. / kPsi^2 * std::log(y/kPsi) - std::log(y) - common::LogGamma(1. / kPsi);
// = 1.0f * std::log(y) - std::log(y) - 0 = 0
float c = 0;
// general form for exponential family.
return -((y * theta - b) / a + c);
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalTweedieNLogLik {
explicit EvalTweedieNLogLik(const char* param) {
CHECK(param != nullptr)
<< "tweedie-nloglik must be in format tweedie-nloglik@rho";
rho_ = atof(param);
CHECK(rho_ < 2 && rho_ >= 1)
<< "tweedie variance power must be in interval [1, 2)";
}
const char *Name() const {
static std::string name;
std::ostringstream os;
os << "tweedie-nloglik@" << rho_;
name = os.str();
return name.c_str();
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float p) const {
bst_float a = y * std::exp((1 - rho_) * std::log(p)) / (1 - rho_);
bst_float b = std::exp((2 - rho_) * std::log(p)) / (2 - rho_);
return -a + b;
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
protected:
bst_float rho_;
};
/*!
* \brief base class of element-wise evaluation
* \tparam Derived the name of subclass
*/
template <typename Policy>
struct EvalEWiseBase : public Metric {
EvalEWiseBase() = default;
explicit EvalEWiseBase(char const* policy_param) : policy_{policy_param} {}
double Eval(HostDeviceVector<bst_float> const& preds, const MetaInfo& info) override {
CHECK_EQ(preds.Size(), info.labels.Size())
<< "label and prediction size not match, "
<< "hint: use merror or mlogloss for multi-class classification";
if (info.labels.Size() != 0) {
CHECK_NE(info.labels.Shape(1), 0);
}
auto labels = info.labels.View(tparam_->gpu_id);
info.weights_.SetDevice(tparam_->gpu_id);
common::OptionalWeights weights(tparam_->IsCPU() ? info.weights_.ConstHostSpan()
: info.weights_.ConstDeviceSpan());
preds.SetDevice(tparam_->gpu_id);
auto predts = tparam_->IsCPU() ? preds.ConstHostSpan() : preds.ConstDeviceSpan();
auto d_policy = policy_;
auto result =
Reduce(tparam_, info, [=] XGBOOST_DEVICE(size_t i, size_t sample_id, size_t target_id) {
float wt = weights[sample_id];
float residue = d_policy.EvalRow(labels(sample_id, target_id), predts[i]);
residue *= wt;
return std::make_tuple(residue, wt);
});
double dat[2]{result.Residue(), result.Weights()};
rabit::Allreduce<rabit::op::Sum>(dat, 2);
return Policy::GetFinal(dat[0], dat[1]);
}
const char* Name() const override { return policy_.Name(); }
private:
Policy policy_;
};
XGBOOST_REGISTER_METRIC(RMSE, "rmse")
.describe("Rooted mean square error.")
.set_body([](const char*) { return new EvalEWiseBase<EvalRowRMSE>(); });
XGBOOST_REGISTER_METRIC(RMSLE, "rmsle")
.describe("Rooted mean square log error.")
.set_body([](const char*) { return new EvalEWiseBase<EvalRowRMSLE>(); });
XGBOOST_REGISTER_METRIC(MAE, "mae").describe("Mean absolute error.").set_body([](const char*) {
return new EvalEWiseBase<EvalRowMAE>();
});
XGBOOST_REGISTER_METRIC(MAPE, "mape")
.describe("Mean absolute percentage error.")
.set_body([](const char*) { return new EvalEWiseBase<EvalRowMAPE>(); });
XGBOOST_REGISTER_METRIC(LogLoss, "logloss")
.describe("Negative loglikelihood for logistic regression.")
.set_body([](const char*) { return new EvalEWiseBase<EvalRowLogLoss>(); });
XGBOOST_REGISTER_METRIC(PseudoErrorLoss, "mphe")
.describe("Mean Pseudo-huber error.")
.set_body([](const char*) { return new PseudoErrorLoss{}; });
XGBOOST_REGISTER_METRIC(PossionNegLoglik, "poisson-nloglik")
.describe("Negative loglikelihood for poisson regression.")
.set_body([](const char*) { return new EvalEWiseBase<EvalPoissonNegLogLik>(); });
XGBOOST_REGISTER_METRIC(GammaDeviance, "gamma-deviance")
.describe("Residual deviance for gamma regression.")
.set_body([](const char*) { return new EvalEWiseBase<EvalGammaDeviance>(); });
XGBOOST_REGISTER_METRIC(GammaNLogLik, "gamma-nloglik")
.describe("Negative log-likelihood for gamma regression.")
.set_body([](const char*) { return new EvalEWiseBase<EvalGammaNLogLik>(); });
XGBOOST_REGISTER_METRIC(Error, "error")
.describe("Binary classification error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalError>(param); });
XGBOOST_REGISTER_METRIC(TweedieNLogLik, "tweedie-nloglik")
.describe("tweedie-nloglik@rho for tweedie regression.")
.set_body([](const char* param) {
return new EvalEWiseBase<EvalTweedieNLogLik>(param);
});
} // namespace metric
} // namespace xgboost
|
980fdcd84c81b245ee7bded6e1411c06c3183ee1.hip | // !!! This is a file automatically generated by hipify!!!
#include <numeric>
#include <cstdio>
#include <hip/hip_runtime.h>
#include "util.hpp"
__global__
void histogram(int* x, int* bins, int n) {
auto i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<n) {
const auto c = x[i];
bins[c]++;
}
}
int main(void) {
const int n = 1024;
const int c = 16;
int* x = malloc_managed<int>(n);
for (auto i=0; i<n; ++i) x[i] = rand()%c;
int* bins = malloc_managed<int>(c);
std::fill(bins, bins+c, 0);
hipLaunchKernelGGL(( histogram), dim3(1), dim3(n), 0, 0, x, bins, n);
hipDeviceSynchronize();
printf("bins: ");
for (auto i=0; i<c; ++i) printf("%d ", bins[i]); printf("\n");
auto sum = std::accumulate(bins, bins+c, 0);
printf("sum %d, expected %d\n", sum, n);
hipFree(x);
return 0;
}
| 980fdcd84c81b245ee7bded6e1411c06c3183ee1.cu | #include <numeric>
#include <cstdio>
#include <cuda.h>
#include "util.hpp"
__global__
void histogram(int* x, int* bins, int n) {
auto i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<n) {
const auto c = x[i];
bins[c]++;
}
}
int main(void) {
const int n = 1024;
const int c = 16;
int* x = malloc_managed<int>(n);
for (auto i=0; i<n; ++i) x[i] = rand()%c;
int* bins = malloc_managed<int>(c);
std::fill(bins, bins+c, 0);
histogram<<<1, n>>>(x, bins, n);
cudaDeviceSynchronize();
printf("bins: ");
for (auto i=0; i<c; ++i) printf("%d ", bins[i]); printf("\n");
auto sum = std::accumulate(bins, bins+c, 0);
printf("sum %d, expected %d\n", sum, n);
cudaFree(x);
return 0;
}
|
b1a2ded3963486b5e6180a05e86c38de70af64b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdint>
// sets of fg/bg pixels in python not sorted, so slightly different result
// here, total sum over array should be identical
__device__ void _fillConsensusArray(
unsigned idx, unsigned idy, unsigned idz,
const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE],
const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE],
float outCons[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE*2])
{
unsigned int mid = int((PSX*PSY*PSZ)/2);
unsigned const PSXH = int(PSX/2);
unsigned const PSYH = int(PSY/2);
unsigned const PSZH = int(PSZ/2);
// ignore border pixels
if ((idx < (DATAXSIZE-PSXH)) &&
(idy < (DATAYSIZE-PSYH)) &&
(idz < (DATAZSIZE-PSZH)) &&
(idx >= (PSXH)) &&
(idy >= (PSYH)) &&
(idz >= (PSZH))){
// only if pixel in foreground
if(inPred[mid][idz][idy][idx] <= TH)
return;
// for all pairs of pixels in patch
for(int pz1 = 0; pz1 < PSZ; pz1++) {
for(int py1 = 0; py1 < PSY; py1++) {
for(int px1 = 0; px1 < PSX; px1++) {
// offset in patch pixel 1
int po1 = px1 + PSX * py1 + PSX * PSY * pz1;
// first element of pair should have high affinity
// (to not count every pair twice)
float v1 = inPred[po1][idz][idy][idx];
if(v1 <= TH) {
continue;
}
// check if predicted affinity in patch agrees
// with corresponding pixel in fg prediction
const int z1 = idz+pz1-PSZH;
const int y1 = idy+py1-PSYH;
const int x1 = idx+px1-PSXH;
if(inPred[mid][z1][y1][x1] <= TH) {
continue;
}
if(inOverlap[z1][y1][x1] != 0){
continue;
}
// second element of pixel pair
for(int pz2 = 0; pz2 < PSZ; pz2++) {
for(int py2 = 0; py2 < PSY; py2++) {
for(int px2 = 0; px2 < PSX; px2++) {
// offset in patch pixel 2
int po2 = px2 + PSX * py2 + PSX * PSY * pz2;
if (po1 == po2)
continue;
const int z2 = idz+pz2-PSZH;
const int y2 = idy+py2-PSYH;
const int x2 = idx+px2-PSXH;
// patch pixel should correspond to foreground
if(inPred[mid][z2][y2][x2] <= TH) {
continue;
}
if(inOverlap[z2][y2][x2] != 0){
continue;
}
float v2 = inPred[po2][idz][idy][idx];
// offset from pixel 1 to pixel 2
int zo = pz2-pz1+PSZ-1;
int yo = py2-py1+PSY-1;
int xo = px2-px1+PSX-1;
// if both high affinity, increase consensus
// pixel 1 with offset yo/xo to pixel 2
if(v2 > TH) {
if(po2 <= po1)
continue;
// atomicAdd(
// &outCons[zo][yo][xo][z1][y1][x1],
// 1);
float v3 = (v1*v2 - TH*TH)/(1.0-TH*TH);
atomicAdd(
&outCons[zo][yo][xo][z1][y1][x1*2],
v3);
atomicAdd(
&outCons[zo][yo][xo][z1][y1][x1*2+1],
1);
}
// if one foreground/one background,
// decrease consensus
else if(v2 < THI) {
// reverse order if pixel 2 before pixel1
if(po2 <= po1) {
zo = pz1-pz2;
zo += PSZ-1;
yo = py1-py2;
yo += PSY-1;
xo = px1-px2;
xo += PSX-1;
// atomicAdd(
// &outCons[zo][yo][xo][z2][y2][x2],
// -1);
float v3 = (v1*(1-v2) - TH*TH)/(1.0-TH*TH);
// v3 = v3*4/3;
atomicAdd(
&outCons[zo][yo][xo][z2][y2][x2*2],
-v3);
atomicAdd(
&outCons[zo][yo][xo][z2][y2][x2*2+1],
1);
}
else {
// atomicAdd(
// &outCons[zo][yo][xo][z1][y1][x1],
// -1);
// v3 = v3*4/3;
float v3 = (v1*(1-v2) - TH*TH)/(1.0-TH*TH);
atomicAdd(
&outCons[zo][yo][xo][z1][y1][x1*2],
-v3);
atomicAdd(
&outCons[zo][yo][xo][z1][y1][x1*2+1],
1);
}
}
}
}
}
}
}
}
}
}
// device function to set the 3D volume
__global__ void fillConsensusArray_allPatches(
const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE],
const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE],
float outCons[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE*2])
{
// pixel for this thread: idz, idy, idx
unsigned idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned idy = blockIdx.y*blockDim.y + threadIdx.y;
unsigned idz = blockIdx.z*blockDim.z + threadIdx.z;
//unsigned idz = 0;
_fillConsensusArray(idx, idy, idz, inPred, inOverlap, outCons);
// _fillConsensusArray(idx, idy, idz, inPred, outCons);
}
// device function to set the 3D volume
__global__ void fillConsensusArray_subsetPatches(
const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE],
const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE],
float outCons[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE*2],
const unsigned patchesIDs[], const uint64_t numPatches)
{
unsigned id = blockIdx.x*blockDim.x + threadIdx.x;
if(id >= numPatches)
return;
int idz = patchesIDs[id*3+0];
int idy = patchesIDs[id*3+1];
int idx = patchesIDs[id*3+2];
_fillConsensusArray(idx, idy, idz, inPred, inOverlap, outCons);
// _fillConsensusArray(idx, idy, idz, inPred, outCons);
}
#ifdef MAIN_FILLCONSENSUS
#include "verySimpleArgParse.h"
#include "cuda_vote_instances.h"
int main(int argc, char *argv[])
{
std::string affinitiesFileName = getAndCheckArg(argc, argv,
"--affinities");
std::string consensusFileName = getAndCheckArg(argc, argv, "--consensus");;
predAff_t *inPredAffinitiesGPU = allocLoadPred(affinitiesFileName);
consensus_t *outConsensusGPU = allocInitConsensus();
computeConsensus(consensusFileName, inPredAffinitiesGPU, outConsensusGPU);
return 0;
}
#endif
| b1a2ded3963486b5e6180a05e86c38de70af64b4.cu | #include <cstdint>
// sets of fg/bg pixels in python not sorted, so slightly different result
// here, total sum over array should be identical
__device__ void _fillConsensusArray(
unsigned idx, unsigned idy, unsigned idz,
const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE],
const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE],
float outCons[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE*2])
{
unsigned int mid = int((PSX*PSY*PSZ)/2);
unsigned const PSXH = int(PSX/2);
unsigned const PSYH = int(PSY/2);
unsigned const PSZH = int(PSZ/2);
// ignore border pixels
if ((idx < (DATAXSIZE-PSXH)) &&
(idy < (DATAYSIZE-PSYH)) &&
(idz < (DATAZSIZE-PSZH)) &&
(idx >= (PSXH)) &&
(idy >= (PSYH)) &&
(idz >= (PSZH))){
// only if pixel in foreground
if(inPred[mid][idz][idy][idx] <= TH)
return;
// for all pairs of pixels in patch
for(int pz1 = 0; pz1 < PSZ; pz1++) {
for(int py1 = 0; py1 < PSY; py1++) {
for(int px1 = 0; px1 < PSX; px1++) {
// offset in patch pixel 1
int po1 = px1 + PSX * py1 + PSX * PSY * pz1;
// first element of pair should have high affinity
// (to not count every pair twice)
float v1 = inPred[po1][idz][idy][idx];
if(v1 <= TH) {
continue;
}
// check if predicted affinity in patch agrees
// with corresponding pixel in fg prediction
const int z1 = idz+pz1-PSZH;
const int y1 = idy+py1-PSYH;
const int x1 = idx+px1-PSXH;
if(inPred[mid][z1][y1][x1] <= TH) {
continue;
}
if(inOverlap[z1][y1][x1] != 0){
continue;
}
// second element of pixel pair
for(int pz2 = 0; pz2 < PSZ; pz2++) {
for(int py2 = 0; py2 < PSY; py2++) {
for(int px2 = 0; px2 < PSX; px2++) {
// offset in patch pixel 2
int po2 = px2 + PSX * py2 + PSX * PSY * pz2;
if (po1 == po2)
continue;
const int z2 = idz+pz2-PSZH;
const int y2 = idy+py2-PSYH;
const int x2 = idx+px2-PSXH;
// patch pixel should correspond to foreground
if(inPred[mid][z2][y2][x2] <= TH) {
continue;
}
if(inOverlap[z2][y2][x2] != 0){
continue;
}
float v2 = inPred[po2][idz][idy][idx];
// offset from pixel 1 to pixel 2
int zo = pz2-pz1+PSZ-1;
int yo = py2-py1+PSY-1;
int xo = px2-px1+PSX-1;
// if both high affinity, increase consensus
// pixel 1 with offset yo/xo to pixel 2
if(v2 > TH) {
if(po2 <= po1)
continue;
// atomicAdd(
// &outCons[zo][yo][xo][z1][y1][x1],
// 1);
float v3 = (v1*v2 - TH*TH)/(1.0-TH*TH);
atomicAdd(
&outCons[zo][yo][xo][z1][y1][x1*2],
v3);
atomicAdd(
&outCons[zo][yo][xo][z1][y1][x1*2+1],
1);
}
// if one foreground/one background,
// decrease consensus
else if(v2 < THI) {
// reverse order if pixel 2 before pixel1
if(po2 <= po1) {
zo = pz1-pz2;
zo += PSZ-1;
yo = py1-py2;
yo += PSY-1;
xo = px1-px2;
xo += PSX-1;
// atomicAdd(
// &outCons[zo][yo][xo][z2][y2][x2],
// -1);
float v3 = (v1*(1-v2) - TH*TH)/(1.0-TH*TH);
// v3 = v3*4/3;
atomicAdd(
&outCons[zo][yo][xo][z2][y2][x2*2],
-v3);
atomicAdd(
&outCons[zo][yo][xo][z2][y2][x2*2+1],
1);
}
else {
// atomicAdd(
// &outCons[zo][yo][xo][z1][y1][x1],
// -1);
// v3 = v3*4/3;
float v3 = (v1*(1-v2) - TH*TH)/(1.0-TH*TH);
atomicAdd(
&outCons[zo][yo][xo][z1][y1][x1*2],
-v3);
atomicAdd(
&outCons[zo][yo][xo][z1][y1][x1*2+1],
1);
}
}
}
}
}
}
}
}
}
}
// device function to set the 3D volume
__global__ void fillConsensusArray_allPatches(
const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE],
const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE],
float outCons[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE*2])
{
// pixel for this thread: idz, idy, idx
unsigned idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned idy = blockIdx.y*blockDim.y + threadIdx.y;
unsigned idz = blockIdx.z*blockDim.z + threadIdx.z;
//unsigned idz = 0;
_fillConsensusArray(idx, idy, idz, inPred, inOverlap, outCons);
// _fillConsensusArray(idx, idy, idz, inPred, outCons);
}
// device function to set the 3D volume
__global__ void fillConsensusArray_subsetPatches(
const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE],
const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE],
float outCons[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE*2],
const unsigned patchesIDs[], const uint64_t numPatches)
{
unsigned id = blockIdx.x*blockDim.x + threadIdx.x;
if(id >= numPatches)
return;
int idz = patchesIDs[id*3+0];
int idy = patchesIDs[id*3+1];
int idx = patchesIDs[id*3+2];
_fillConsensusArray(idx, idy, idz, inPred, inOverlap, outCons);
// _fillConsensusArray(idx, idy, idz, inPred, outCons);
}
#ifdef MAIN_FILLCONSENSUS
#include "verySimpleArgParse.h"
#include "cuda_vote_instances.h"
int main(int argc, char *argv[])
{
std::string affinitiesFileName = getAndCheckArg(argc, argv,
"--affinities");
std::string consensusFileName = getAndCheckArg(argc, argv, "--consensus");;
predAff_t *inPredAffinitiesGPU = allocLoadPred(affinitiesFileName);
consensus_t *outConsensusGPU = allocInitConsensus();
computeConsensus(consensusFileName, inPredAffinitiesGPU, outConsensusGPU);
return 0;
}
#endif
|
da6458c9d2cccd2213e7e0bb5b0ed0be06eabe53.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Name: Nate Steawrt
* Date: 04-04-16
* Description: Serial implementation of Matrix morphism
*/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#define RANDOM_VALUE_MIN 1.0
#define RANDOM_VALUE_MAX 2.0
#define NUM_ROWS 4097
#define NUM_COLS 4097
/*
* Calculate and return a random value between min and max.
*/
double randDouble(double min, double max) {
double range = max - min;
double dist = RAND_MAX / range;
return min + (rand() / dist);
}
/*
* Output the matrix to fout
*/
void outputMatrix(FILE *fout, double *matrix, int rows, int cols) {
int i, j;
for (i = 0; i < rows; i++) {
for (j = 0; j < cols; j++) {
fprintf(fout, "%lf ", *(matrix + i * cols + j));
}
fprintf(fout, "\n");
}
}
__global__ void computeMath(double *matrix) {
int i;
// Grab id of thread
int threadId = blockDim.x * threadIdx.y + threadIdx.x + 1;
// Declare pointers to the two arguments of the addition and the result pointer
double *f_ptr, *first_ptr, *second_ptr;
// Grab starting points for pointers
f_ptr = matrix + threadId * NUM_COLS;
first_ptr = matrix + (threadId - 1) * NUM_COLS + 1;
second_ptr = f_ptr + 1;
// Compute a single row
for (i = 0; i < NUM_COLS - 1; i++, f_ptr++, first_ptr++, second_ptr++) {
*f_ptr = *first_ptr + *second_ptr;
}
}
/*
* Check if an error occurred during the last CUDA command
*/
void checkError() {
int errorCode = hipGetLastError();
if (errorCode != 0) {
printf("Error %d occurred during last operation.\n", errorCode);
}
}
int main(void) {
// Declare the needed variables
int i, j;
// Define thread hierarchy
int nblocks = 64;
int dimX = 64;
int dimY = 1;
// Declare the memory pointers
double *h_matrix, *d_matrix;
// Allocate memory for host and device
size_t memSize = NUM_ROWS * NUM_COLS * sizeof(*h_matrix);
// Create space on the host and device for matrix
h_matrix = (double *)malloc(memSize);
hipMalloc( (void**) &d_matrix, memSize);
checkError();
// Initialize the matrix and copy values into device
double *f_ptr = h_matrix; // Setup a traversal pointer
for (i = 0; i < NUM_ROWS; i++) {
for (j = 0; j < NUM_COLS; j++, f_ptr++) {
*f_ptr = randDouble(RANDOM_VALUE_MIN, RANDOM_VALUE_MAX);
}
}
hipMemcpy(d_matrix, h_matrix, memSize, hipMemcpyHostToDevice);
checkError();
// Set up grid and block structure
dim3 dimGrid(nblocks);
dim3 dimBlock(dimX, dimY);
// Launch the kernel
for (i = 0; i < 100; i++) {
hipLaunchKernelGGL(( computeMath), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_matrix);
checkError();
}
// Retrieve results and free memory
hipMemcpy(h_matrix, d_matrix, memSize, hipMemcpyDeviceToHost);
checkError();
free(h_matrix);
hipFree(d_matrix);
checkError();
}
| da6458c9d2cccd2213e7e0bb5b0ed0be06eabe53.cu | /*
* Name: Nate Steawrt
* Date: 04-04-16
* Description: Serial implementation of Matrix morphism
*/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#define RANDOM_VALUE_MIN 1.0
#define RANDOM_VALUE_MAX 2.0
#define NUM_ROWS 4097
#define NUM_COLS 4097
/*
* Calculate and return a random value between min and max.
*/
double randDouble(double min, double max) {
double range = max - min;
double dist = RAND_MAX / range;
return min + (rand() / dist);
}
/*
* Output the matrix to fout
*/
void outputMatrix(FILE *fout, double *matrix, int rows, int cols) {
int i, j;
for (i = 0; i < rows; i++) {
for (j = 0; j < cols; j++) {
fprintf(fout, "%lf ", *(matrix + i * cols + j));
}
fprintf(fout, "\n");
}
}
__global__ void computeMath(double *matrix) {
int i;
// Grab id of thread
int threadId = blockDim.x * threadIdx.y + threadIdx.x + 1;
// Declare pointers to the two arguments of the addition and the result pointer
double *f_ptr, *first_ptr, *second_ptr;
// Grab starting points for pointers
f_ptr = matrix + threadId * NUM_COLS;
first_ptr = matrix + (threadId - 1) * NUM_COLS + 1;
second_ptr = f_ptr + 1;
// Compute a single row
for (i = 0; i < NUM_COLS - 1; i++, f_ptr++, first_ptr++, second_ptr++) {
*f_ptr = *first_ptr + *second_ptr;
}
}
/*
* Check if an error occurred during the last CUDA command
*/
void checkError() {
int errorCode = cudaGetLastError();
if (errorCode != 0) {
printf("Error %d occurred during last operation.\n", errorCode);
}
}
int main(void) {
// Declare the needed variables
int i, j;
// Define thread hierarchy
int nblocks = 64;
int dimX = 64;
int dimY = 1;
// Declare the memory pointers
double *h_matrix, *d_matrix;
// Allocate memory for host and device
size_t memSize = NUM_ROWS * NUM_COLS * sizeof(*h_matrix);
// Create space on the host and device for matrix
h_matrix = (double *)malloc(memSize);
cudaMalloc( (void**) &d_matrix, memSize);
checkError();
// Initialize the matrix and copy values into device
double *f_ptr = h_matrix; // Setup a traversal pointer
for (i = 0; i < NUM_ROWS; i++) {
for (j = 0; j < NUM_COLS; j++, f_ptr++) {
*f_ptr = randDouble(RANDOM_VALUE_MIN, RANDOM_VALUE_MAX);
}
}
cudaMemcpy(d_matrix, h_matrix, memSize, cudaMemcpyHostToDevice);
checkError();
// Set up grid and block structure
dim3 dimGrid(nblocks);
dim3 dimBlock(dimX, dimY);
// Launch the kernel
for (i = 0; i < 100; i++) {
computeMath<<< dimGrid, dimBlock >>>(d_matrix);
checkError();
}
// Retrieve results and free memory
cudaMemcpy(h_matrix, d_matrix, memSize, cudaMemcpyDeviceToHost);
checkError();
free(h_matrix);
cudaFree(d_matrix);
checkError();
}
|
97ca76a5a4c665cddfd2765a4f3fdebff7d8fdb0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/cuda/tau.cuh"
__device__ bool is_critical_reaction(state state, reactions reactions, int sbi, int ri)
{
bool crit = false;
int sf = 0; // reactants found
// Instead of looping on every specie, we stop early if we already
// found 2 reactants (since we don't support reactions with 3 or
// more).
for (int spi = 0; sf < 2 && spi < SPC; spi++) {
// get reactant coeff. for specie spi
int rt = reactions.r[GET_COEFF(spi, ri)];
// if rt > 0, we found another reactant of reaction ri
sf += (rt > 0);
// if rt > 0 then specie spi is required. If the current
// subvolume has 0 mols of spi, the reaction is critical.
if(rt > 0 && state.curr[GET_SPI(spi, sbi)] == 0){
return true;
}
// delta is the net variation in population spi after NC
// firings of the reaction
int delta = (reactions.p[GET_COEFF(spi, ri)] - reactions.r[GET_COEFF(spi, ri)]) * NC;
if(delta >= 0) {
// the reaction actually *increases* (or leave unchanged)
// the current specie popolation, so it's obviously not
// critical
continue;
}
// Now delta < 0 and abs(delta) is the decrease in specie spi
// population caused by the reaction.
// If abs(delta) > population, the reaction is critical.
crit = crit || (abs(delta) > state.curr[GET_SPI(spi, sbi)]);
}
return crit;
}
__device__ bool is_critical_diffusion(state state, int sbi, int spi)
{
return state.curr[GET_SPI(spi, sbi)] < NC;
}
__device__ float compute_g(state state, reactions reactions, int * hors, int sbi, int spi)
{
int hor = hors[spi];
int x; // to shut up compiler warning
switch (hor) {
case 1:
return 1;
case 2:
return 2;
case 3:
x = state.curr[GET_SPI(spi, sbi)];
return 2.0 + 1.0 / (x - 1);
default:
// HOR(spi) == 0 if spi does not appear as reactant in any
// reaction. Return +Inf so that when we divide by g we get 0
// and the procedure takes max(0, 1) = 1 as g.
return INFINITY;
}
}
__device__ float compute_tau_sp(state state, reactions reactions, int * hors,
bool crit_r[MAXREACTIONS], neigh neigh,
int sbi, int spi, rates rates)
{
float g = compute_g(state, reactions, hors, sbi, spi);
int x = state.curr[GET_SPI(spi, sbi)];
// compute mu and sigma2
// compute mu (as defined in Cao, Gillespie, Petzold - Efficient
// step size selection for the tau-leaping simulation method, J
// chem Phys 124, 044109, page 7, formula 32a), for specie spi in
// subvolume sbi
float mu = 0.0;
// compute sigma2 (as defined in Cao, Gillespie, Petzold -
// Efficient step size selection for the tau-leaping simulation
// method, J chem Phys 124, 044109, page 7, formula 32b), for
// specie spi in subvolume sbi
float sigma2 = 0.0;
// sum propensities for the reactions
for (int ri = 0; ri < RC; ri++) {
// when computing mu we only sum over non-critical reactions
if (crit_r[ri]) {
continue;
}
// mu is the sum of (change_vector) * (reaction_rate) over
// non-critical reactions.
//
// sigma2 is the sum of (change_vector) * (reaction_rate)
// over non-critical reactions.
int v = reactions.p[GET_COEFF(spi, ri)] - reactions.r[GET_COEFF(spi, ri)];
mu += v * rates.reaction[GET_RR(ri, sbi)];
sigma2 += (v * v) * rates.reaction[GET_RR(ri, sbi)];
}
if(is_critical_diffusion(state, sbi, spi)) {
// if spi is critical in this subvolume, don't sum
// propensities of outgoing diffusions
} else {
// Add propensities of outgoing diffusions for specie spi. We
// should sum the diffusion propensities over all the
// neighbours, but diff_rates_array already has the overall
// diffusion propensity.
mu += rates.diffusion[GET_DR(spi, sbi)];
sigma2 += rates.diffusion[GET_DR(spi, sbi)];
}
// add propensities of incoming diffusions for specie spi
for (int i = 0; i < 6; i++) { // loop over the neighbours
unsigned int ni = neigh.index[sbi * 6 + i]; // neighbour index
if(ni == sbi) {
continue;
}
int nni = neigh.count[ni];
// Subtract from mu the propensity of specie spi in subvolume
// ni divided by nni (i.e. we sum a negative value).
mu -= (rates.diffusion[GET_DR(spi, ni)]) / nni;
// Add to sigma2 the propensity of specie spi in subvolume ni
// divided by nni. No need to square since the coeff. is
// always -1, just sum 1.
sigma2 += (rates.diffusion[GET_DR(spi, ni)]) / nni;
}
float m = max(EPSILON * x / g, 1.0f);
float t1 = m / abs(mu);
float t2 = (m * m) / (sigma2);
return min(t1, t2);
}
__device__ float compute_tau_ncr(state state, reactions reactions,
int * hors, bool crit_r[MAXREACTIONS], neigh neigh,
int sbi, rates rates)
{
float min_tau = INFINITY;
for (int spi = 0; spi < SPC; spi++) {
// First of all we need to check if the specie spi is involved
// in a critical event. If it is, skip it.
bool skip_r = false;
// check for critical reaction events
for (int ri = 0; ri < RC; ri++) {
// skip if reaction is critical and the specie is involved
skip_r = skip_r || (crit_r[ri] && (reactions.r[GET_COEFF(spi, ri)] > 0));
}
// check for critical diffusion events
bool skip_d = is_critical_diffusion(state, sbi, spi);
if (skip_r && skip_d) // ??? should be ||
continue;
// spi is not involved in any critical event.
float tau = compute_tau_sp(state, reactions, hors, crit_r, neigh, sbi, spi, rates);
min_tau = min(min_tau, tau);
}
return min_tau;
}
__device__ float compute_tau_cr(state state, bool crit_r[MAXREACTIONS],
int sbi, rates rates, curandStateMRG32k3a * s)
{
float react_rates_sum_cr = 0.0; // sum of the react rates of critical reactions
float diff_rates_sum_cr = 0.0; // sum of diffusion rates of critical diffusion events
for (int ri = 0; ri < RC; ri++) {
react_rates_sum_cr += (rates.reaction[GET_RR(ri, sbi)] * crit_r[ri]);
}
for (int spi = 0; spi < SPC; spi++) {
diff_rates_sum_cr += (rates.diffusion[GET_DR(spi, sbi)] * is_critical_diffusion(state, sbi, spi));
}
if (react_rates_sum_cr == 0.0 && diff_rates_sum_cr == 0.0)
return INFINITY;
float rand = hiprand_uniform(&s[sbi]);
return -logf(rand) / (react_rates_sum_cr + diff_rates_sum_cr);
}
__global__ void compute_taus(state state, reactions reactions, int * hors, neigh neigh,
rates rates, float * tau, float min_tau, char * leap, curandStateMRG32k3a * s)
{
INDCHECK()
// If on the previous step nobody changed our state from SSA_FF to
// SSA, it means that no molecule entered here and we can
// fast-forward time without recomputing anything. Set tau =
// old_tau - min_tau and return.
//
// Check:
// - If min_tau == 0.0 we are setting up the simulation, so pass.
// - If tau[sbi] is +Inf no event can happen here, so pass.
// - If min_tau == tau[sbi], it means that we have the same
// tau as the subvolume with the min_tau, but he was the one
// and we didn't get to act. We can't fast-forward (that
// would bring tau to zero), so just recompute a new tau.
if (leap[sbi] == SSA_FF && !isinf(tau[sbi]) && min_tau > 0.0 && min_tau != tau[sbi]) {
tau[sbi] -= min_tau;
return;
}
// crit_r[ri] == TRUE if ri is critical in this subvolume
bool crit_r[MAXREACTIONS];
for (int ri = 0; ri < RC; ri++) {
crit_r[ri] = is_critical_reaction(state, reactions, sbi, ri);
}
float tau_ncr = compute_tau_ncr(state, reactions, hors, crit_r, neigh, sbi, rates);
float tau_cr = compute_tau_cr(state, crit_r, sbi, rates, s);
// If tau_ncr is +Inf then every reaction is critical, and we
// can't leap. Also prevent leap if tau_ncr is too small.
bool leap_here = true;
if (isinf(tau_ncr) /*|| (tau_ncr < 10.0 / rates.matrix[GET_RATE(2, sbi)])*/) {
// We start with fast-forward enabled. If someone diffuses to
// us, they will need disable it by setting the state to SSA.
leap[sbi] = SSA_FF;
leap_here = false;
}
if (tau_ncr < tau_cr) {
// no critical event will happen, we'll leap with all the
// non-critical events
tau[sbi] = tau_ncr;
if (leap_here)
leap[sbi] = LEAP_NOCR;
} else {
// a single critical event will happen, all the non-critical
// events will leap with tau = tau_cr
tau[sbi] = tau_cr;
if (leap_here)
leap[sbi] = LEAP_CR;
}
} | 97ca76a5a4c665cddfd2765a4f3fdebff7d8fdb0.cu | #include "../include/cuda/tau.cuh"
__device__ bool is_critical_reaction(state state, reactions reactions, int sbi, int ri)
{
bool crit = false;
int sf = 0; // reactants found
// Instead of looping on every specie, we stop early if we already
// found 2 reactants (since we don't support reactions with 3 or
// more).
for (int spi = 0; sf < 2 && spi < SPC; spi++) {
// get reactant coeff. for specie spi
int rt = reactions.r[GET_COEFF(spi, ri)];
// if rt > 0, we found another reactant of reaction ri
sf += (rt > 0);
// if rt > 0 then specie spi is required. If the current
// subvolume has 0 mols of spi, the reaction is critical.
if(rt > 0 && state.curr[GET_SPI(spi, sbi)] == 0){
return true;
}
// delta is the net variation in population spi after NC
// firings of the reaction
int delta = (reactions.p[GET_COEFF(spi, ri)] - reactions.r[GET_COEFF(spi, ri)]) * NC;
if(delta >= 0) {
// the reaction actually *increases* (or leave unchanged)
// the current specie popolation, so it's obviously not
// critical
continue;
}
// Now delta < 0 and abs(delta) is the decrease in specie spi
// population caused by the reaction.
// If abs(delta) > population, the reaction is critical.
crit = crit || (abs(delta) > state.curr[GET_SPI(spi, sbi)]);
}
return crit;
}
__device__ bool is_critical_diffusion(state state, int sbi, int spi)
{
return state.curr[GET_SPI(spi, sbi)] < NC;
}
__device__ float compute_g(state state, reactions reactions, int * hors, int sbi, int spi)
{
int hor = hors[spi];
int x; // to shut up compiler warning
switch (hor) {
case 1:
return 1;
case 2:
return 2;
case 3:
x = state.curr[GET_SPI(spi, sbi)];
return 2.0 + 1.0 / (x - 1);
default:
// HOR(spi) == 0 if spi does not appear as reactant in any
// reaction. Return +Inf so that when we divide by g we get 0
// and the procedure takes max(0, 1) = 1 as g.
return INFINITY;
}
}
__device__ float compute_tau_sp(state state, reactions reactions, int * hors,
bool crit_r[MAXREACTIONS], neigh neigh,
int sbi, int spi, rates rates)
{
float g = compute_g(state, reactions, hors, sbi, spi);
int x = state.curr[GET_SPI(spi, sbi)];
// compute mu and sigma2
// compute mu (as defined in Cao, Gillespie, Petzold - Efficient
// step size selection for the tau-leaping simulation method, J
// chem Phys 124, 044109, page 7, formula 32a), for specie spi in
// subvolume sbi
float mu = 0.0;
// compute sigma2 (as defined in Cao, Gillespie, Petzold -
// Efficient step size selection for the tau-leaping simulation
// method, J chem Phys 124, 044109, page 7, formula 32b), for
// specie spi in subvolume sbi
float sigma2 = 0.0;
// sum propensities for the reactions
for (int ri = 0; ri < RC; ri++) {
// when computing mu we only sum over non-critical reactions
if (crit_r[ri]) {
continue;
}
// mu is the sum of (change_vector) * (reaction_rate) over
// non-critical reactions.
//
// sigma2 is the sum of (change_vector)² * (reaction_rate)
// over non-critical reactions.
int v = reactions.p[GET_COEFF(spi, ri)] - reactions.r[GET_COEFF(spi, ri)];
mu += v * rates.reaction[GET_RR(ri, sbi)];
sigma2 += (v * v) * rates.reaction[GET_RR(ri, sbi)];
}
if(is_critical_diffusion(state, sbi, spi)) {
// if spi is critical in this subvolume, don't sum
// propensities of outgoing diffusions
} else {
// Add propensities of outgoing diffusions for specie spi. We
// should sum the diffusion propensities over all the
// neighbours, but diff_rates_array already has the overall
// diffusion propensity.
mu += rates.diffusion[GET_DR(spi, sbi)];
sigma2 += rates.diffusion[GET_DR(spi, sbi)];
}
// add propensities of incoming diffusions for specie spi
for (int i = 0; i < 6; i++) { // loop over the neighbours
unsigned int ni = neigh.index[sbi * 6 + i]; // neighbour index
if(ni == sbi) {
continue;
}
int nni = neigh.count[ni];
// Subtract from mu the propensity of specie spi in subvolume
// ni divided by nni (i.e. we sum a negative value).
mu -= (rates.diffusion[GET_DR(spi, ni)]) / nni;
// Add to sigma2 the propensity of specie spi in subvolume ni
// divided by nni. No need to square since the coeff. is
// always -1, just sum 1.
sigma2 += (rates.diffusion[GET_DR(spi, ni)]) / nni;
}
float m = max(EPSILON * x / g, 1.0f);
float t1 = m / abs(mu);
float t2 = (m * m) / (sigma2);
return min(t1, t2);
}
__device__ float compute_tau_ncr(state state, reactions reactions,
int * hors, bool crit_r[MAXREACTIONS], neigh neigh,
int sbi, rates rates)
{
float min_tau = INFINITY;
for (int spi = 0; spi < SPC; spi++) {
// First of all we need to check if the specie spi is involved
// in a critical event. If it is, skip it.
bool skip_r = false;
// check for critical reaction events
for (int ri = 0; ri < RC; ri++) {
// skip if reaction is critical and the specie is involved
skip_r = skip_r || (crit_r[ri] && (reactions.r[GET_COEFF(spi, ri)] > 0));
}
// check for critical diffusion events
bool skip_d = is_critical_diffusion(state, sbi, spi);
if (skip_r && skip_d) // ??? should be ||
continue;
// spi is not involved in any critical event.
float tau = compute_tau_sp(state, reactions, hors, crit_r, neigh, sbi, spi, rates);
min_tau = min(min_tau, tau);
}
return min_tau;
}
__device__ float compute_tau_cr(state state, bool crit_r[MAXREACTIONS],
int sbi, rates rates, curandStateMRG32k3a * s)
{
float react_rates_sum_cr = 0.0; // sum of the react rates of critical reactions
float diff_rates_sum_cr = 0.0; // sum of diffusion rates of critical diffusion events
for (int ri = 0; ri < RC; ri++) {
react_rates_sum_cr += (rates.reaction[GET_RR(ri, sbi)] * crit_r[ri]);
}
for (int spi = 0; spi < SPC; spi++) {
diff_rates_sum_cr += (rates.diffusion[GET_DR(spi, sbi)] * is_critical_diffusion(state, sbi, spi));
}
if (react_rates_sum_cr == 0.0 && diff_rates_sum_cr == 0.0)
return INFINITY;
float rand = curand_uniform(&s[sbi]);
return -logf(rand) / (react_rates_sum_cr + diff_rates_sum_cr);
}
__global__ void compute_taus(state state, reactions reactions, int * hors, neigh neigh,
rates rates, float * tau, float min_tau, char * leap, curandStateMRG32k3a * s)
{
INDCHECK()
// If on the previous step nobody changed our state from SSA_FF to
// SSA, it means that no molecule entered here and we can
// fast-forward time without recomputing anything. Set tau =
// old_tau - min_tau and return.
//
// Check:
// - If min_tau == 0.0 we are setting up the simulation, so pass.
// - If tau[sbi] is +Inf no event can happen here, so pass.
// - If min_tau == tau[sbi], it means that we have the same
// tau as the subvolume with the min_tau, but he was the one
// and we didn't get to act. We can't fast-forward (that
// would bring tau to zero), so just recompute a new tau.
if (leap[sbi] == SSA_FF && !isinf(tau[sbi]) && min_tau > 0.0 && min_tau != tau[sbi]) {
tau[sbi] -= min_tau;
return;
}
// crit_r[ri] == TRUE if ri is critical in this subvolume
bool crit_r[MAXREACTIONS];
for (int ri = 0; ri < RC; ri++) {
crit_r[ri] = is_critical_reaction(state, reactions, sbi, ri);
}
float tau_ncr = compute_tau_ncr(state, reactions, hors, crit_r, neigh, sbi, rates);
float tau_cr = compute_tau_cr(state, crit_r, sbi, rates, s);
// If tau_ncr is +Inf then every reaction is critical, and we
// can't leap. Also prevent leap if tau_ncr is too small.
bool leap_here = true;
if (isinf(tau_ncr) /*|| (tau_ncr < 10.0 / rates.matrix[GET_RATE(2, sbi)])*/) {
// We start with fast-forward enabled. If someone diffuses to
// us, they will need disable it by setting the state to SSA.
leap[sbi] = SSA_FF;
leap_here = false;
}
if (tau_ncr < tau_cr) {
// no critical event will happen, we'll leap with all the
// non-critical events
tau[sbi] = tau_ncr;
if (leap_here)
leap[sbi] = LEAP_NOCR;
} else {
// a single critical event will happen, all the non-critical
// events will leap with tau = tau_cr
tau[sbi] = tau_cr;
if (leap_here)
leap[sbi] = LEAP_CR;
}
} |
29df9853dc3b7c159f43983b6256e8d6a64f0505.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2015 Patrick Diehl
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include "utils.hpp"
#include <hip/hip_runtime.h>
#include <iostream>
#include <cmath>
#include "examples/opencl/benchmark_vector/timer.hpp"
#include "config.hpp"
//###########################################################################
//Kernels
//###########################################################################
template<typename T>
__global__ void logn(size_t count, T* in, T* out) {
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < count;
i += gridDim.x * blockDim.x) {
out[i] = LOG(in[i]);
}
}
template<typename T>
__global__ void expn(size_t count, T* in, T* out) {
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < count;
i += gridDim.x * blockDim.x) {
out[i] = EXP(in[i]);
}
}
template<typename T>
__global__ void add(size_t count, T* in1, T* in2, T* out) {
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < count;
i += gridDim.x * blockDim.x) {
out[i] = in1[i] + in2[i];
}
}
template<typename T>
__global__ void dbl(size_t count, T* in, T* out) {
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < count;
i += gridDim.x * blockDim.x) {
out[i] = 2.0 * in[i];
}
}
template<typename T>
__global__ void mul(size_t count, T* in1, T* in2, T* out) {
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < count;
i += gridDim.x * blockDim.x) {
out[i] = in1[i] * in2[i];
}
}
//###########################################################################
//Main
//###########################################################################
int main(int argc, char*argv[]) {
if(argc != 2)
{
std::cout << "Usage: " << argv[0] << " #elements" << std::endl;
exit(1);
}
size_t count = atoi(argv[1]);
std::cout << count << " ";
//Timer
double data = 0.;
//Pointer
TYPE* out;
TYPE* out_dev;
TYPE* in1;
TYPE* in1_dev;
TYPE* in2;
TYPE* in2_dev;
timer_start();
//Malloc Host
hipHostMalloc((void**) &out, count * sizeof(TYPE));
hipHostMalloc((void**) &in1, count * sizeof(TYPE));
hipHostMalloc((void**) &in2, count * sizeof(TYPE));
//Malloc Device
hipHostMalloc((void**) &out_dev, count * sizeof(TYPE));
hipHostMalloc((void**) &in1_dev, count * sizeof(TYPE));
hipHostMalloc((void**) &in2_dev, count * sizeof(TYPE));
//Initialize the data
fillRandomVector(in1, count);
fillRandomVector(in2, count);
//Copy data to the device
hipMemcpy(in1_dev, in1, count * sizeof(TYPE), hipMemcpyHostToDevice);
hipMemcpy(in2_dev, in2, count * sizeof(TYPE), hipMemcpyHostToDevice);
hipMemcpy(out_dev, in1, count * sizeof(TYPE), hipMemcpyHostToDevice);
data = timer_stop();
//######################################################################
//Launch kernels
//######################################################################
int gridsize = 1;
int blocksize = 32;
// 1. logn kernel
timer_start();
hipLaunchKernelGGL(( logn<TYPE>), dim3(gridsize), dim3(blocksize), 0, 0, count, in1_dev, out_dev);
hipDeviceSynchronize();
hipMemcpy(out, out_dev, count * sizeof(TYPE), hipMemcpyDeviceToHost);
std::cout << timer_stop() << " ";
for (size_t i = 0; i < count; i++) {
if (!(std::abs(::log(in1[i]) - out[i]) < EPS))
std::cout << "Error for logn at " << i << std::endl;
}
// 2. expn kernel
timer_start();
hipLaunchKernelGGL(( expn<TYPE>), dim3(gridsize), dim3(blocksize), 0, 0, count, in1_dev, out_dev);
hipDeviceSynchronize();
hipMemcpy(out, out_dev, count * sizeof(TYPE), hipMemcpyDeviceToHost);
std::cout << timer_stop() << " ";
for (size_t i = 0; i < count; i++) {
if (!(std::abs(::exp(in1[i]) - out[i]) < EPS))
std::cout << "Error for expn at " << i << std::endl;
}
// 3. dbl kernel
timer_start();
hipLaunchKernelGGL(( dbl<TYPE>), dim3(gridsize), dim3(blocksize), 0, 0, count, in1_dev, out_dev);
hipDeviceSynchronize();
hipMemcpy(out, out_dev, count * sizeof(TYPE), hipMemcpyDeviceToHost);
std::cout << timer_stop() << " ";
for (size_t i = 0; i < count; i++) {
if (!(std::abs(2.0 * in1[i] - out[i]) < EPS))
std::cout << "Error for dbl at " << i << std::endl;
}
// 4. add kernel
timer_start();
hipLaunchKernelGGL(( add<TYPE>), dim3(gridsize), dim3(blocksize), 0, 0, count, in1_dev, in2_dev, out_dev);
hipDeviceSynchronize();
hipMemcpy(out, out_dev, count * sizeof(TYPE), hipMemcpyDeviceToHost);
std::cout << timer_stop() << " ";
for (size_t i = 0; i < count; i++) {
if (!(std::abs(in1[i] + in2[i] - out[i]) < EPS))
std::cout << "Error for add at " << i << std::endl;
}
// 5. mul kernel
timer_start();
hipLaunchKernelGGL(( mul<TYPE>), dim3(gridsize), dim3(blocksize), 0, 0, count, in1_dev, in2_dev, out_dev);
hipDeviceSynchronize();
hipMemcpy(out, out_dev, count * sizeof(TYPE), hipMemcpyDeviceToHost);
std::cout << timer_stop() << " ";
for (size_t i = 0; i < count; i++) {
if (!(std::abs(in1[i] * in2[i] - out[i]) < EPS))
std::cout << "Error for mul at " << i << std::endl;
}
//######################################################################
//Clean
//######################################################################
timer_start();
hipHostFree(in1);
hipHostFree(in2);
hipHostFree(out);
data += timer_stop();
std::cout << data << std::endl;
return EXIT_SUCCESS;
}
| 29df9853dc3b7c159f43983b6256e8d6a64f0505.cu | // Copyright (c) 2015 Patrick Diehl
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include "utils.hpp"
#include <cuda.h>
#include <iostream>
#include <cmath>
#include "examples/opencl/benchmark_vector/timer.hpp"
#include "config.hpp"
//###########################################################################
//Kernels
//###########################################################################
template<typename T>
__global__ void logn(size_t count, T* in, T* out) {
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < count;
i += gridDim.x * blockDim.x) {
out[i] = LOG(in[i]);
}
}
template<typename T>
__global__ void expn(size_t count, T* in, T* out) {
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < count;
i += gridDim.x * blockDim.x) {
out[i] = EXP(in[i]);
}
}
template<typename T>
__global__ void add(size_t count, T* in1, T* in2, T* out) {
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < count;
i += gridDim.x * blockDim.x) {
out[i] = in1[i] + in2[i];
}
}
template<typename T>
__global__ void dbl(size_t count, T* in, T* out) {
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < count;
i += gridDim.x * blockDim.x) {
out[i] = 2.0 * in[i];
}
}
template<typename T>
__global__ void mul(size_t count, T* in1, T* in2, T* out) {
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < count;
i += gridDim.x * blockDim.x) {
out[i] = in1[i] * in2[i];
}
}
//###########################################################################
//Main
//###########################################################################
int main(int argc, char*argv[]) {
if(argc != 2)
{
std::cout << "Usage: " << argv[0] << " #elements" << std::endl;
exit(1);
}
size_t count = atoi(argv[1]);
std::cout << count << " ";
//Timer
double data = 0.;
//Pointer
TYPE* out;
TYPE* out_dev;
TYPE* in1;
TYPE* in1_dev;
TYPE* in2;
TYPE* in2_dev;
timer_start();
//Malloc Host
cudaMallocHost((void**) &out, count * sizeof(TYPE));
cudaMallocHost((void**) &in1, count * sizeof(TYPE));
cudaMallocHost((void**) &in2, count * sizeof(TYPE));
//Malloc Device
cudaMallocHost((void**) &out_dev, count * sizeof(TYPE));
cudaMallocHost((void**) &in1_dev, count * sizeof(TYPE));
cudaMallocHost((void**) &in2_dev, count * sizeof(TYPE));
//Initialize the data
fillRandomVector(in1, count);
fillRandomVector(in2, count);
//Copy data to the device
cudaMemcpy(in1_dev, in1, count * sizeof(TYPE), cudaMemcpyHostToDevice);
cudaMemcpy(in2_dev, in2, count * sizeof(TYPE), cudaMemcpyHostToDevice);
cudaMemcpy(out_dev, in1, count * sizeof(TYPE), cudaMemcpyHostToDevice);
data = timer_stop();
//######################################################################
//Launch kernels
//######################################################################
int gridsize = 1;
int blocksize = 32;
// 1. logn kernel
timer_start();
logn<TYPE><<<gridsize, blocksize>>>(count, in1_dev, out_dev);
cudaDeviceSynchronize();
cudaMemcpy(out, out_dev, count * sizeof(TYPE), cudaMemcpyDeviceToHost);
std::cout << timer_stop() << " ";
for (size_t i = 0; i < count; i++) {
if (!(std::abs(std::log(in1[i]) - out[i]) < EPS))
std::cout << "Error for logn at " << i << std::endl;
}
// 2. expn kernel
timer_start();
expn<TYPE><<<gridsize, blocksize>>>(count, in1_dev, out_dev);
cudaDeviceSynchronize();
cudaMemcpy(out, out_dev, count * sizeof(TYPE), cudaMemcpyDeviceToHost);
std::cout << timer_stop() << " ";
for (size_t i = 0; i < count; i++) {
if (!(std::abs(std::exp(in1[i]) - out[i]) < EPS))
std::cout << "Error for expn at " << i << std::endl;
}
// 3. dbl kernel
timer_start();
dbl<TYPE><<<gridsize, blocksize>>>(count, in1_dev, out_dev);
cudaDeviceSynchronize();
cudaMemcpy(out, out_dev, count * sizeof(TYPE), cudaMemcpyDeviceToHost);
std::cout << timer_stop() << " ";
for (size_t i = 0; i < count; i++) {
if (!(std::abs(2.0 * in1[i] - out[i]) < EPS))
std::cout << "Error for dbl at " << i << std::endl;
}
// 4. add kernel
timer_start();
add<TYPE><<<gridsize, blocksize>>>(count, in1_dev, in2_dev, out_dev);
cudaDeviceSynchronize();
cudaMemcpy(out, out_dev, count * sizeof(TYPE), cudaMemcpyDeviceToHost);
std::cout << timer_stop() << " ";
for (size_t i = 0; i < count; i++) {
if (!(std::abs(in1[i] + in2[i] - out[i]) < EPS))
std::cout << "Error for add at " << i << std::endl;
}
// 5. mul kernel
timer_start();
mul<TYPE><<<gridsize, blocksize>>>(count, in1_dev, in2_dev, out_dev);
cudaDeviceSynchronize();
cudaMemcpy(out, out_dev, count * sizeof(TYPE), cudaMemcpyDeviceToHost);
std::cout << timer_stop() << " ";
for (size_t i = 0; i < count; i++) {
if (!(std::abs(in1[i] * in2[i] - out[i]) < EPS))
std::cout << "Error for mul at " << i << std::endl;
}
//######################################################################
//Clean
//######################################################################
timer_start();
cudaFreeHost(in1);
cudaFreeHost(in2);
cudaFreeHost(out);
data += timer_stop();
std::cout << data << std::endl;
return EXIT_SUCCESS;
}
|
4cb18e370f94876316926fcbc3570885a6f37dac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void g_getSoftMaxP(float* softMaxP, float* b, int cols)
{
int bid = blockIdx.x;
extern __shared__ float _share[];
float * _max = _share;
float * _sum = _share + blockDim.x;
float* sp = softMaxP + bid * cols;
_sum[threadIdx.x] = 0.0;
_max[threadIdx.x] = -100000000.0;
for(int tid = 0; tid < cols; tid += blockDim.x){
int id = tid + threadIdx.x;
if(id < cols){
sp[id] += b[id];
_max[threadIdx.x] = max(_max[threadIdx.x], sp[id]);
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
if(_max[threadIdx.x] < _max[threadIdx.x + skip])
{
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x){
int id = tid + threadIdx.x;
if(id < cols){
sp[id] -= _max[0];
sp[id] = __expf(sp[id]);
_sum[threadIdx.x] += sp[id];
}
}
__syncthreads();
len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x){
int id = tid + threadIdx.x;
if(id < cols){
sp[id] /= _sum[0];
}
}
} | 4cb18e370f94876316926fcbc3570885a6f37dac.cu | #include "includes.h"
__global__ void g_getSoftMaxP(float* softMaxP, float* b, int cols)
{
int bid = blockIdx.x;
extern __shared__ float _share[];
float * _max = _share;
float * _sum = _share + blockDim.x;
float* sp = softMaxP + bid * cols;
_sum[threadIdx.x] = 0.0;
_max[threadIdx.x] = -100000000.0;
for(int tid = 0; tid < cols; tid += blockDim.x){
int id = tid + threadIdx.x;
if(id < cols){
sp[id] += b[id];
_max[threadIdx.x] = max(_max[threadIdx.x], sp[id]);
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
if(_max[threadIdx.x] < _max[threadIdx.x + skip])
{
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x){
int id = tid + threadIdx.x;
if(id < cols){
sp[id] -= _max[0];
sp[id] = __expf(sp[id]);
_sum[threadIdx.x] += sp[id];
}
}
__syncthreads();
len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x){
int id = tid + threadIdx.x;
if(id < cols){
sp[id] /= _sum[0];
}
}
} |
c3ecf707b94dcb0c15f7191d47423a7036e9af1c.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <iostream>
#include <ctime>
#include "helper_math.h" // overload operators for floatN
#include "helper_cuda.h"
// thrust
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/quaternion.hpp>
#include "rigidbody.h"
#include "collision.h"
namespace CUDA
{
RigidBody* body_ptr;
int* grid_ptr;
int numberOfBodies;
int numberOfPlanes;
void initRigidBodies(RigidBody* host_bodies, int size, int planeCount)
{
hipMalloc(&body_ptr, sizeof(RigidBody) * size);
hipMemcpy(body_ptr, host_bodies, sizeof(RigidBody) * size, hipMemcpyHostToDevice);
numberOfBodies = size;
numberOfPlanes = planeCount;
// init grid
int gridSize = numberOfBodies * (numberOfBodies + numberOfPlanes);
hipMalloc(&grid_ptr, sizeof(int) * gridSize);
}
void shutdownRigidBodies()
{
hipFree(body_ptr);
hipFree(grid_ptr);
}
__global__ void getPosAndRot(RigidBody* bodies, vec3_t* pos_ptr, quat_t* rot_ptr, int numberOfBodies)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < numberOfBodies)
{
RigidBody& rb = bodies[tid];
pos_ptr[tid] = rb.position;
rot_ptr[tid] = rb.rotation;
}
}
void getOrientationData(std::vector<glm::vec3>& pos, std::vector<glm::quat>& rot)
{
int threadsPerBlock = 128;
int blocks = numberOfBodies / threadsPerBlock + 1;
vec3_t* pos_ptr;
quat_t* rot_ptr;
hipMalloc(&pos_ptr, sizeof(vec3_t) * numberOfBodies);
hipMalloc(&rot_ptr, sizeof(quat_t) * numberOfBodies);
hipLaunchKernelGGL(( getPosAndRot), dim3(blocks), dim3(threadsPerBlock), 0, 0, body_ptr, pos_ptr, rot_ptr, numberOfBodies);
hipMemcpy(&pos[0], pos_ptr, sizeof(vec3_t) * numberOfBodies, hipMemcpyDeviceToHost);
hipMemcpy(&rot[0], rot_ptr, sizeof(quat_t) * numberOfBodies, hipMemcpyDeviceToHost);
hipFree(pos_ptr);
hipFree(rot_ptr);
}
__host__ __device__ void quatToRot3(quat_t& quat, float rot[3][3])
{
rot[0][0] = 1.f - 2.f * quat.y * quat.y - 2.f * quat.z * quat.z;
rot[0][1] = 2.f * quat.x * quat.y - 2.f * quat.w * quat.z;
rot[0][2] = 2.f * quat.x * quat.z + 2.f * quat.w * quat.y;
rot[1][0] = 2.f * quat.x * quat.y + 2.f * quat.w * quat.z;
rot[1][1] = 1.f - 2.f * quat.x * quat.x - 2.f * quat.z * quat.z;
rot[1][2] = 2.f * quat.y * quat.z - 2.f * quat.w * quat.x;
rot[2][0] = 2.f * quat.x * quat.z - 2.f * quat.w * quat.z;
rot[2][1] = 2.f * quat.y * quat.z + 2.f * quat.w * quat.x;
rot[2][2] = 1.f - 2.f * quat.x * quat.x - 2.f * quat.y * quat.y;
}
__host__ __device__ void transposeMatrix(float in[3][3], float out[3][3])
{
for (int y = 0; y < 3; ++y)
{
for (int x = 0; x < 3; ++x)
{
out[x][y] = in[y][x];
}
}
}
__host__ __device__ void matTimesMat(float l[3][3], float r[3][3], float out[3][3])
{
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
float sum = 0.f;
for (int k = 0; k < 3; ++k)
{
sum += l[row][k] * r[k][col];
}
out[row][col] = sum;
}
}
}
__host__ __device__ void matTimesScalar(float m[3][3], float s)
{
for (int y = 0; y < 3; ++y)
{
for (int x = 0; x < 3; ++x)
{
m[y][x] *= s;
}
}
}
__host__ __device__ void matTimesVec(float m[3][3], vec3_t& in, vec3_t& out)
{
out.x = m[0][0] * in.x + m[0][1] * in.y + m[0][2] * in.z;
out.y = m[1][0] * in.x + m[1][1] * in.y + m[1][2] * in.z;
out.z = m[2][0] * in.x + m[2][1] * in.y + m[2][2] * in.z;
}
__host__ __device__ void quatTimesQuat(quat_t& l, quat_t& r, quat_t& out)
{
float3 v0 = make_float3(l.x, l.y, l.z);
float3 v1 = make_float3(r.x, r.y, r.z);
out.w = l.w * r.w - dot(v0, v1);
float3 v = l.w * v1 + r.w * v0 + cross(v0, v1);
out.x = v.x;
out.y = v.y;
out.z = v.z;
}
__global__ void updateBodies(RigidBody* bodies, int numberOfBodies, float dt)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < numberOfBodies)
{
RigidBody& rb = bodies[tid];
rb.position += (rb.linearVelocity / rb.mass) * dt;
rb.linearVelocity += dt * make_float3(0.0, -1.5f, 0.0f); // gravity
if (length(rb.angularMomentum) == 0.f)
return;
float rot[3][3];
quatToRot3(rb.rotation, rot);
float invRot[3][3];
transposeMatrix(rot, invRot);
// equation 6
float curInertia[3][3];
float tmp[3][3];
matTimesMat(rot, rb.invInertia, tmp);
matTimesMat(tmp, invRot, curInertia);
// equation 5
matTimesVec(curInertia, rb.angularMomentum, rb.angularVelocity);
// equation 7
float3 rotationAxis = normalize(rb.angularVelocity);
float rotationAngle = length(rb.angularVelocity * dt);
quaternion<float> dq(rotationAxis, rotationAngle);
// equation 8
quaternion<float> newRot = dq * rb.rotation;
rb.rotation = newRot;
}
}
__device__ inline void incrementGrid(int* grid, int width, int ownID, int otherID)
{
int index = width * ownID + otherID;
atomicAdd(&grid[index], 1);
}
__device__ inline void getAbsPositionAndVelocity(RigidBody& rb, Sphere& sphere, float3& pos, float3& vel)
{
float3 abs_pos;
float rot[3][3];
quatToRot3(rb.rotation, rot);
matTimesVec(rot, sphere.position, abs_pos);
float3 ang = cross(rb.angularVelocity, abs_pos);
float3 lin = rb.linearVelocity;
vel = ang + lin;
pos = abs_pos + rb.position;
}
__global__ void collisionDetection(RigidBody* bodies, int numberOfBodies, Sphere* spheres, int numberOfSpheres, Plane* planes, int numberOfPlanes, int* grid)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < numberOfSpheres)
{
int rb_ID = 0;
int gridWidth = numberOfBodies + numberOfPlanes;
uint sphereSum = 0;
for (int r = 0; r < numberOfBodies; ++r)
{
sphereSum += bodies[r].numberOfSpheres;
if (tid < sphereSum)
{
// found corresponding body
rb_ID = r;
break;
}
}
Sphere& sphere = spheres[tid];
RigidBody& rb = bodies[rb_ID];
sphere.force = make_float3(0.0);
// absolute position & velocity
float3 abs_pos, abs_vel;
getAbsPositionAndVelocity(rb, sphere, abs_pos, abs_vel);
Sphere s = sphere;
s.position = abs_pos;
s.velocity = abs_vel;
sphere.sphereCollider = -1;
sphere.planeCollider = -1;
// PLANE COLLISION
for (int p = 0; p < numberOfPlanes; ++p)
{
float penetration = collideSpherePlane(s, planes[p]);
if (penetration != -1.0f)
{
incrementGrid(grid, gridWidth, rb_ID, numberOfBodies + p);
sphere.planeCollider = p;
rb.linearVelocity = make_float3(0.f);
rb.angularMomentum = make_float3(0.f);
}
}
// SPHERE COLLISION - brute force for now
for (int s = 0; s < numberOfSpheres; ++s)
{
}
}
}
__device__ inline void atomicAddAngularMomentum(RigidBody& rb, const float3& t)
{
atomicAdd(&rb.angularMomentum.x, t.x);
atomicAdd(&rb.angularMomentum.y, t.y);
atomicAdd(&rb.angularMomentum.z, t.z);
}
__device__ inline void atomicAddLinearVelocity(RigidBody& rb, const float3& l)
{
atomicAdd(&rb.linearVelocity.x, l.x);
atomicAdd(&rb.linearVelocity.y, l.y);
atomicAdd(&rb.linearVelocity.z, l.z);
}
__global__ void collisionResponse(RigidBody* bodies, int numberOfBodies, Sphere* spheres, int numberOfSpheres, Plane* planes, int numberOfPlanes, int* grid, float dt)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < numberOfSpheres)
{
int rb_ID = 0;
int gridWidth = numberOfBodies + numberOfPlanes;
uint sphereSum = 0;
for (int r = 0; r < numberOfBodies; ++r)
{
sphereSum += bodies[r].numberOfSpheres;
if (tid < sphereSum)
{
rb_ID = r;
break;
}
}
Sphere& sphere = spheres[tid];
RigidBody& rb = bodies[rb_ID];
// absolute position & velocity
float3 abs_pos, abs_vel;
getAbsPositionAndVelocity(rb, sphere, abs_pos, abs_vel);
if (sphere.planeCollider != -1)
{
// PLANE RESPONSE
Plane& plane = planes[sphere.planeCollider];
int numberOfCollisions = grid[rb_ID * gridWidth + numberOfBodies + sphere.planeCollider];
if (numberOfCollisions != 0)
{
float mass = rb.mass / numberOfCollisions;
float vNormal = -length(dot(abs_vel,plane.normal) * plane.normal);
float epsilon = 0.1;
float j = -(1+epsilon) * mass *vNormal;
float3 J = j * plane.normal;
float l = length(abs_vel - (vNormal * plane.normal));
if (l > 0){
//friction
float mu = 0.1;
float3 frictionTerm = (abs_vel - (vNormal * plane.normal)) / l;
J -= mu * j * frictionTerm;
}
const float deltaT = 1.0f;
float3 momentum = J*deltaT;
float3 toAdd = momentum ;
// particle.velocity += toAdd;
sphere.force += toAdd;
}
}
if (sphere.sphereCollider != -1)
{
// SPHERE RESPONSE
}
float3 linearForce = sphere.force;
float3 torque = 0.1*cross(sphere.position, linearForce);
//TODO use reduce
atomicAddLinearVelocity(rb, linearForce);
atomicAddAngularMomentum(rb, torque);
}
}
__global__ void clearGrid(int* grid, int sizeOfGrid)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < sizeOfGrid)
{
grid[tid] = 0;
}
}
void printVec(vec3_t& v)
{
std::cout << "Vector: " << v.x << " " << v.y << " " << v.z << std::endl;
}
void printQuat(quat_t& q)
{
std::cout << "Quaternion: " << q.x << " " << q.y << " " << q.z << " " << q.w << std::endl;
}
void printMat(float m[3][3])
{
std::cout << "Matrix: " << std::endl;
for (int y = 0; y < 3; ++y)
{
for (int x = 0; x < 3; ++x)
{
std::cout << m[y][x] << " ";
}
std::cout << std::endl;
}
}
void printDelim()
{
std::cout << "-----------" << std::endl;
}
void updateRigidBodies(Sphere* spheres, int numberOfSpheres, Plane* planes, int numberOfPlanes, float dt)
{
int threadsPerBlock = 128;
int blocks = numberOfBodies / threadsPerBlock + 1;
hipLaunchKernelGGL(( updateBodies), dim3(blocks), dim3(threadsPerBlock), 0, 0, body_ptr, numberOfBodies, dt);
blocks = (numberOfBodies * (numberOfBodies + numberOfPlanes)) / threadsPerBlock + 1;
hipLaunchKernelGGL(( clearGrid), dim3(blocks), dim3(threadsPerBlock), 0, 0, grid_ptr, numberOfBodies * (numberOfBodies + numberOfPlanes));
blocks = numberOfSpheres / threadsPerBlock + 1;
hipLaunchKernelGGL(( collisionDetection), dim3(blocks), dim3(threadsPerBlock), 0, 0, body_ptr, numberOfBodies, spheres, numberOfSpheres, planes, numberOfPlanes, grid_ptr);
hipLaunchKernelGGL(( collisionResponse), dim3(blocks), dim3(threadsPerBlock), 0, 0, body_ptr, numberOfBodies, spheres, numberOfSpheres, planes, numberOfPlanes, grid_ptr, dt);
}
}
| c3ecf707b94dcb0c15f7191d47423a7036e9af1c.cu | #include <cstdlib>
#include <cuda_runtime.h>
#include <iostream>
#include <ctime>
#include "helper_math.h" // overload operators for floatN
#include "helper_cuda.h"
// thrust
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/quaternion.hpp>
#include "rigidbody.h"
#include "collision.h"
namespace CUDA
{
RigidBody* body_ptr;
int* grid_ptr;
int numberOfBodies;
int numberOfPlanes;
void initRigidBodies(RigidBody* host_bodies, int size, int planeCount)
{
cudaMalloc(&body_ptr, sizeof(RigidBody) * size);
cudaMemcpy(body_ptr, host_bodies, sizeof(RigidBody) * size, cudaMemcpyHostToDevice);
numberOfBodies = size;
numberOfPlanes = planeCount;
// init grid
int gridSize = numberOfBodies * (numberOfBodies + numberOfPlanes);
cudaMalloc(&grid_ptr, sizeof(int) * gridSize);
}
void shutdownRigidBodies()
{
cudaFree(body_ptr);
cudaFree(grid_ptr);
}
__global__ void getPosAndRot(RigidBody* bodies, vec3_t* pos_ptr, quat_t* rot_ptr, int numberOfBodies)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < numberOfBodies)
{
RigidBody& rb = bodies[tid];
pos_ptr[tid] = rb.position;
rot_ptr[tid] = rb.rotation;
}
}
void getOrientationData(std::vector<glm::vec3>& pos, std::vector<glm::quat>& rot)
{
int threadsPerBlock = 128;
int blocks = numberOfBodies / threadsPerBlock + 1;
vec3_t* pos_ptr;
quat_t* rot_ptr;
cudaMalloc(&pos_ptr, sizeof(vec3_t) * numberOfBodies);
cudaMalloc(&rot_ptr, sizeof(quat_t) * numberOfBodies);
getPosAndRot<<<blocks, threadsPerBlock>>>(body_ptr, pos_ptr, rot_ptr, numberOfBodies);
cudaMemcpy(&pos[0], pos_ptr, sizeof(vec3_t) * numberOfBodies, cudaMemcpyDeviceToHost);
cudaMemcpy(&rot[0], rot_ptr, sizeof(quat_t) * numberOfBodies, cudaMemcpyDeviceToHost);
cudaFree(pos_ptr);
cudaFree(rot_ptr);
}
__host__ __device__ void quatToRot3(quat_t& quat, float rot[3][3])
{
rot[0][0] = 1.f - 2.f * quat.y * quat.y - 2.f * quat.z * quat.z;
rot[0][1] = 2.f * quat.x * quat.y - 2.f * quat.w * quat.z;
rot[0][2] = 2.f * quat.x * quat.z + 2.f * quat.w * quat.y;
rot[1][0] = 2.f * quat.x * quat.y + 2.f * quat.w * quat.z;
rot[1][1] = 1.f - 2.f * quat.x * quat.x - 2.f * quat.z * quat.z;
rot[1][2] = 2.f * quat.y * quat.z - 2.f * quat.w * quat.x;
rot[2][0] = 2.f * quat.x * quat.z - 2.f * quat.w * quat.z;
rot[2][1] = 2.f * quat.y * quat.z + 2.f * quat.w * quat.x;
rot[2][2] = 1.f - 2.f * quat.x * quat.x - 2.f * quat.y * quat.y;
}
__host__ __device__ void transposeMatrix(float in[3][3], float out[3][3])
{
for (int y = 0; y < 3; ++y)
{
for (int x = 0; x < 3; ++x)
{
out[x][y] = in[y][x];
}
}
}
__host__ __device__ void matTimesMat(float l[3][3], float r[3][3], float out[3][3])
{
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
float sum = 0.f;
for (int k = 0; k < 3; ++k)
{
sum += l[row][k] * r[k][col];
}
out[row][col] = sum;
}
}
}
__host__ __device__ void matTimesScalar(float m[3][3], float s)
{
for (int y = 0; y < 3; ++y)
{
for (int x = 0; x < 3; ++x)
{
m[y][x] *= s;
}
}
}
__host__ __device__ void matTimesVec(float m[3][3], vec3_t& in, vec3_t& out)
{
out.x = m[0][0] * in.x + m[0][1] * in.y + m[0][2] * in.z;
out.y = m[1][0] * in.x + m[1][1] * in.y + m[1][2] * in.z;
out.z = m[2][0] * in.x + m[2][1] * in.y + m[2][2] * in.z;
}
__host__ __device__ void quatTimesQuat(quat_t& l, quat_t& r, quat_t& out)
{
float3 v0 = make_float3(l.x, l.y, l.z);
float3 v1 = make_float3(r.x, r.y, r.z);
out.w = l.w * r.w - dot(v0, v1);
float3 v = l.w * v1 + r.w * v0 + cross(v0, v1);
out.x = v.x;
out.y = v.y;
out.z = v.z;
}
__global__ void updateBodies(RigidBody* bodies, int numberOfBodies, float dt)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < numberOfBodies)
{
RigidBody& rb = bodies[tid];
rb.position += (rb.linearVelocity / rb.mass) * dt;
rb.linearVelocity += dt * make_float3(0.0, -1.5f, 0.0f); // gravity
if (length(rb.angularMomentum) == 0.f)
return;
float rot[3][3];
quatToRot3(rb.rotation, rot);
float invRot[3][3];
transposeMatrix(rot, invRot);
// equation 6
float curInertia[3][3];
float tmp[3][3];
matTimesMat(rot, rb.invInertia, tmp);
matTimesMat(tmp, invRot, curInertia);
// equation 5
matTimesVec(curInertia, rb.angularMomentum, rb.angularVelocity);
// equation 7
float3 rotationAxis = normalize(rb.angularVelocity);
float rotationAngle = length(rb.angularVelocity * dt);
quaternion<float> dq(rotationAxis, rotationAngle);
// equation 8
quaternion<float> newRot = dq * rb.rotation;
rb.rotation = newRot;
}
}
__device__ inline void incrementGrid(int* grid, int width, int ownID, int otherID)
{
int index = width * ownID + otherID;
atomicAdd(&grid[index], 1);
}
__device__ inline void getAbsPositionAndVelocity(RigidBody& rb, Sphere& sphere, float3& pos, float3& vel)
{
float3 abs_pos;
float rot[3][3];
quatToRot3(rb.rotation, rot);
matTimesVec(rot, sphere.position, abs_pos);
float3 ang = cross(rb.angularVelocity, abs_pos);
float3 lin = rb.linearVelocity;
vel = ang + lin;
pos = abs_pos + rb.position;
}
__global__ void collisionDetection(RigidBody* bodies, int numberOfBodies, Sphere* spheres, int numberOfSpheres, Plane* planes, int numberOfPlanes, int* grid)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < numberOfSpheres)
{
int rb_ID = 0;
int gridWidth = numberOfBodies + numberOfPlanes;
uint sphereSum = 0;
for (int r = 0; r < numberOfBodies; ++r)
{
sphereSum += bodies[r].numberOfSpheres;
if (tid < sphereSum)
{
// found corresponding body
rb_ID = r;
break;
}
}
Sphere& sphere = spheres[tid];
RigidBody& rb = bodies[rb_ID];
sphere.force = make_float3(0.0);
// absolute position & velocity
float3 abs_pos, abs_vel;
getAbsPositionAndVelocity(rb, sphere, abs_pos, abs_vel);
Sphere s = sphere;
s.position = abs_pos;
s.velocity = abs_vel;
sphere.sphereCollider = -1;
sphere.planeCollider = -1;
// PLANE COLLISION
for (int p = 0; p < numberOfPlanes; ++p)
{
float penetration = collideSpherePlane(s, planes[p]);
if (penetration != -1.0f)
{
incrementGrid(grid, gridWidth, rb_ID, numberOfBodies + p);
sphere.planeCollider = p;
rb.linearVelocity = make_float3(0.f);
rb.angularMomentum = make_float3(0.f);
}
}
// SPHERE COLLISION - brute force for now
for (int s = 0; s < numberOfSpheres; ++s)
{
}
}
}
__device__ inline void atomicAddAngularMomentum(RigidBody& rb, const float3& t)
{
atomicAdd(&rb.angularMomentum.x, t.x);
atomicAdd(&rb.angularMomentum.y, t.y);
atomicAdd(&rb.angularMomentum.z, t.z);
}
__device__ inline void atomicAddLinearVelocity(RigidBody& rb, const float3& l)
{
atomicAdd(&rb.linearVelocity.x, l.x);
atomicAdd(&rb.linearVelocity.y, l.y);
atomicAdd(&rb.linearVelocity.z, l.z);
}
__global__ void collisionResponse(RigidBody* bodies, int numberOfBodies, Sphere* spheres, int numberOfSpheres, Plane* planes, int numberOfPlanes, int* grid, float dt)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < numberOfSpheres)
{
int rb_ID = 0;
int gridWidth = numberOfBodies + numberOfPlanes;
uint sphereSum = 0;
for (int r = 0; r < numberOfBodies; ++r)
{
sphereSum += bodies[r].numberOfSpheres;
if (tid < sphereSum)
{
rb_ID = r;
break;
}
}
Sphere& sphere = spheres[tid];
RigidBody& rb = bodies[rb_ID];
// absolute position & velocity
float3 abs_pos, abs_vel;
getAbsPositionAndVelocity(rb, sphere, abs_pos, abs_vel);
if (sphere.planeCollider != -1)
{
// PLANE RESPONSE
Plane& plane = planes[sphere.planeCollider];
int numberOfCollisions = grid[rb_ID * gridWidth + numberOfBodies + sphere.planeCollider];
if (numberOfCollisions != 0)
{
float mass = rb.mass / numberOfCollisions;
float vNormal = -length(dot(abs_vel,plane.normal) * plane.normal);
float epsilon = 0.1;
float j = -(1+epsilon) * mass *vNormal;
float3 J = j * plane.normal;
float l = length(abs_vel - (vNormal * plane.normal));
if (l > 0){
//friction
float mu = 0.1;
float3 frictionTerm = (abs_vel - (vNormal * plane.normal)) / l;
J -= mu * j * frictionTerm;
}
const float deltaT = 1.0f;
float3 momentum = J*deltaT;
float3 toAdd = momentum ;
// particle.velocity += toAdd;
sphere.force += toAdd;
}
}
if (sphere.sphereCollider != -1)
{
// SPHERE RESPONSE
}
float3 linearForce = sphere.force;
float3 torque = 0.1*cross(sphere.position, linearForce);
//TODO use reduce
atomicAddLinearVelocity(rb, linearForce);
atomicAddAngularMomentum(rb, torque);
}
}
__global__ void clearGrid(int* grid, int sizeOfGrid)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < sizeOfGrid)
{
grid[tid] = 0;
}
}
void printVec(vec3_t& v)
{
std::cout << "Vector: " << v.x << " " << v.y << " " << v.z << std::endl;
}
void printQuat(quat_t& q)
{
std::cout << "Quaternion: " << q.x << " " << q.y << " " << q.z << " " << q.w << std::endl;
}
void printMat(float m[3][3])
{
std::cout << "Matrix: " << std::endl;
for (int y = 0; y < 3; ++y)
{
for (int x = 0; x < 3; ++x)
{
std::cout << m[y][x] << " ";
}
std::cout << std::endl;
}
}
void printDelim()
{
std::cout << "-----------" << std::endl;
}
void updateRigidBodies(Sphere* spheres, int numberOfSpheres, Plane* planes, int numberOfPlanes, float dt)
{
int threadsPerBlock = 128;
int blocks = numberOfBodies / threadsPerBlock + 1;
updateBodies<<<blocks, threadsPerBlock>>>(body_ptr, numberOfBodies, dt);
blocks = (numberOfBodies * (numberOfBodies + numberOfPlanes)) / threadsPerBlock + 1;
clearGrid<<<blocks, threadsPerBlock>>>(grid_ptr, numberOfBodies * (numberOfBodies + numberOfPlanes));
blocks = numberOfSpheres / threadsPerBlock + 1;
collisionDetection<<<blocks, threadsPerBlock>>>(body_ptr, numberOfBodies, spheres, numberOfSpheres, planes, numberOfPlanes, grid_ptr);
collisionResponse<<<blocks, threadsPerBlock>>>(body_ptr, numberOfBodies, spheres, numberOfSpheres, planes, numberOfPlanes, grid_ptr, dt);
}
}
|
83d035be34fd800c1ac64ea5a905b93ff1232db1.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _BACKPROP_CUDA_KERNEL_H_
#define _BACKPROP_CUDA_KERNEL_H_
#include <stdio.h>
#include "backprop.h"
#include "math.h"
#include "hip/hip_runtime.h"
__global__ void
my_bpnn_layerforward_CUDA( float *input_cuda,
float *input_hidden_cuda,
float *hidden_partial_sum,
int in,
int hid)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
/* This has been changed to eabled memory coalescing */
int index_in = HEIGHT * by + tx + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
/* This has been changed to eabled memory coalescing */
if ( ty == 0 ) {
input_node[tx] = input_cuda[index_in] ;
}
__syncthreads();
weight_matrix[ty][tx] = input_hidden_cuda[index]; // load weights
__syncthreads();
weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty];
__syncthreads();
/*
for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++) {
int power_two = __powf(2, i);
if( ty % power_two == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
}
__syncthreads();
}
*/
if( ty == 0) {
for( int i = 1; i < HEIGHT; i++) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[i][tx];
}
}
/* Do we need to following assignment???? */
//input_hidden_cuda[index] = weight_matrix[ty][tx];
__syncthreads();
/* This has been changed to eabled memory coalescing */
if ( ty == 0 ) {
hidden_partial_sum[by * hid + tx] = weight_matrix[ty][tx];
}
}
__global__ void
bpnn_layerforward_CUDA( float *input_cuda,
float *input_hidden_cuda, // this is actuall the weights
float *hidden_partial_sum,
int in,
int hid)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_in = HEIGHT * by + ty + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
if ( tx == 0 ) {
input_node[ty] = input_cuda[index_in] ;
}
__syncthreads();
weight_matrix[ty][tx] = input_hidden_cuda[index]; // load weights
__syncthreads();
weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty];
__syncthreads();
for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++) {
int power_two = __powf(2, i);
if( ty % power_two == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
}
__syncthreads();
}
/* Do we need to following assignment???? */
//input_hidden_cuda[index] = weight_matrix[ty][tx];
/*
for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){
unsigned int power_two = i - 1;
if( (ty & power_two) == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
}
}
*/
__syncthreads();
if ( tx == 0 ) {
hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty];
}
}
__global__ void
my_bpnn_adjust_weights_cuda( float * delta,
int hid,
float * ly,
int in,
float * w,
float * oldw)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_x = tx + 1;
__shared__ float tmp[WIDTH];
int shr_idx = HEIGHT * by + tx + 1;
if ( ty == 0 ) {
tmp[tx] = ly[shr_idx];
}
__syncthreads();
w[index] += ((ETA * delta[index_x] * tmp[ty]) + (MOMENTUM * oldw[index]));
oldw[index] = ((ETA * delta[index_x] * tmp[ty]) + (MOMENTUM * oldw[index]));
//__syncthreads();
if (ty == 0 && by ==0) {
w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
}
}
__global__ void
bpnn_adjust_weights_cuda( float * delta,
int hid,
float * ly,
int in,
float * w,
float * oldw)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_y = HEIGHT * by + ty + 1;
int index_x = tx + 1;
w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
__syncthreads();
if (ty == 0 && by ==0) {
w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
}
}
#endif
| 83d035be34fd800c1ac64ea5a905b93ff1232db1.cu |
#ifndef _BACKPROP_CUDA_KERNEL_H_
#define _BACKPROP_CUDA_KERNEL_H_
#include <stdio.h>
#include "backprop.h"
#include "math.h"
#include "cuda.h"
__global__ void
my_bpnn_layerforward_CUDA( float *input_cuda,
float *input_hidden_cuda,
float *hidden_partial_sum,
int in,
int hid)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
/* This has been changed to eabled memory coalescing */
int index_in = HEIGHT * by + tx + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
/* This has been changed to eabled memory coalescing */
if ( ty == 0 ) {
input_node[tx] = input_cuda[index_in] ;
}
__syncthreads();
weight_matrix[ty][tx] = input_hidden_cuda[index]; // load weights
__syncthreads();
weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty];
__syncthreads();
/*
for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++) {
int power_two = __powf(2, i);
if( ty % power_two == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
}
__syncthreads();
}
*/
if( ty == 0) {
for( int i = 1; i < HEIGHT; i++) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[i][tx];
}
}
/* Do we need to following assignment???? */
//input_hidden_cuda[index] = weight_matrix[ty][tx];
__syncthreads();
/* This has been changed to eabled memory coalescing */
if ( ty == 0 ) {
hidden_partial_sum[by * hid + tx] = weight_matrix[ty][tx];
}
}
__global__ void
bpnn_layerforward_CUDA( float *input_cuda,
float *input_hidden_cuda, // this is actuall the weights
float *hidden_partial_sum,
int in,
int hid)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_in = HEIGHT * by + ty + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
if ( tx == 0 ) {
input_node[ty] = input_cuda[index_in] ;
}
__syncthreads();
weight_matrix[ty][tx] = input_hidden_cuda[index]; // load weights
__syncthreads();
weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty];
__syncthreads();
for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++) {
int power_two = __powf(2, i);
if( ty % power_two == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
}
__syncthreads();
}
/* Do we need to following assignment???? */
//input_hidden_cuda[index] = weight_matrix[ty][tx];
/*
for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){
unsigned int power_two = i - 1;
if( (ty & power_two) == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
}
}
*/
__syncthreads();
if ( tx == 0 ) {
hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty];
}
}
__global__ void
my_bpnn_adjust_weights_cuda( float * delta,
int hid,
float * ly,
int in,
float * w,
float * oldw)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_x = tx + 1;
__shared__ float tmp[WIDTH];
int shr_idx = HEIGHT * by + tx + 1;
if ( ty == 0 ) {
tmp[tx] = ly[shr_idx];
}
__syncthreads();
w[index] += ((ETA * delta[index_x] * tmp[ty]) + (MOMENTUM * oldw[index]));
oldw[index] = ((ETA * delta[index_x] * tmp[ty]) + (MOMENTUM * oldw[index]));
//__syncthreads();
if (ty == 0 && by ==0) {
w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
}
}
__global__ void
bpnn_adjust_weights_cuda( float * delta,
int hid,
float * ly,
int in,
float * w,
float * oldw)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_y = HEIGHT * by + ty + 1;
int index_x = tx + 1;
w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
__syncthreads();
if (ty == 0 && by ==0) {
w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
}
}
#endif
|
64ea3d1cf20caa051acf96e61fd2019d542ac40c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/merge.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4>
__global__
void merge_kernel(Iterator1 first1, Iterator1 last1,
Iterator2 first2, Iterator2 last2,
Iterator3 result1,
Iterator4 result2)
{
*result2 = thrust::merge(thrust::seq, first1, last1, first2, last2, result1);
}
void TestMergeDeviceSeq()
{
thrust::device_vector<int> a(3), b(4);
a[0] = 0; a[1] = 2; a[2] = 4;
b[0] = 0; b[1] = 3; b[2] = 3; b[3] = 4;
thrust::device_vector<int> ref(7);
ref[0] = 0;
ref[1] = 0;
ref[2] = 2;
ref[3] = 3;
ref[4] = 3;
ref[5] = 4;
ref[6] = 4;
thrust::device_vector<int> result(7);
thrust::device_vector<thrust::device_vector<int>::iterator> result_end(1);
hipLaunchKernelGGL(( merge_kernel), dim3(1),dim3(1), 0, 0, a.begin(), a.end(),
b.begin(), b.end(),
result.begin(),
result_end.begin());
thrust::device_vector<int>::iterator end = result_end[0];
ASSERT_EQUAL_QUIET(result.end(), end);
ASSERT_EQUAL(ref, result);
}
DECLARE_UNITTEST(TestMergeDeviceSeq);
void TestMergeCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a(3), b(4);
a[0] = 0; a[1] = 2; a[2] = 4;
b[0] = 0; b[1] = 3; b[2] = 3; b[3] = 4;
Vector ref(7);
ref[0] = 0;
ref[1] = 0;
ref[2] = 2;
ref[3] = 3;
ref[4] = 3;
ref[5] = 4;
ref[6] = 4;
Vector result(7);
hipStream_t s;
hipStreamCreate(&s);
Iterator end = thrust::merge(thrust::hip::par(s),
a.begin(), a.end(),
b.begin(), b.end(),
result.begin());
ASSERT_EQUAL_QUIET(result.end(), end);
ASSERT_EQUAL(ref, result);
hipStreamDestroy(s);
}
DECLARE_UNITTEST(TestMergeCudaStreams);
| 64ea3d1cf20caa051acf96e61fd2019d542ac40c.cu | #include <unittest/unittest.h>
#include <thrust/merge.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4>
__global__
void merge_kernel(Iterator1 first1, Iterator1 last1,
Iterator2 first2, Iterator2 last2,
Iterator3 result1,
Iterator4 result2)
{
*result2 = thrust::merge(thrust::seq, first1, last1, first2, last2, result1);
}
void TestMergeDeviceSeq()
{
thrust::device_vector<int> a(3), b(4);
a[0] = 0; a[1] = 2; a[2] = 4;
b[0] = 0; b[1] = 3; b[2] = 3; b[3] = 4;
thrust::device_vector<int> ref(7);
ref[0] = 0;
ref[1] = 0;
ref[2] = 2;
ref[3] = 3;
ref[4] = 3;
ref[5] = 4;
ref[6] = 4;
thrust::device_vector<int> result(7);
thrust::device_vector<thrust::device_vector<int>::iterator> result_end(1);
merge_kernel<<<1,1>>>(a.begin(), a.end(),
b.begin(), b.end(),
result.begin(),
result_end.begin());
thrust::device_vector<int>::iterator end = result_end[0];
ASSERT_EQUAL_QUIET(result.end(), end);
ASSERT_EQUAL(ref, result);
}
DECLARE_UNITTEST(TestMergeDeviceSeq);
void TestMergeCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a(3), b(4);
a[0] = 0; a[1] = 2; a[2] = 4;
b[0] = 0; b[1] = 3; b[2] = 3; b[3] = 4;
Vector ref(7);
ref[0] = 0;
ref[1] = 0;
ref[2] = 2;
ref[3] = 3;
ref[4] = 3;
ref[5] = 4;
ref[6] = 4;
Vector result(7);
cudaStream_t s;
cudaStreamCreate(&s);
Iterator end = thrust::merge(thrust::cuda::par(s),
a.begin(), a.end(),
b.begin(), b.end(),
result.begin());
ASSERT_EQUAL_QUIET(result.end(), end);
ASSERT_EQUAL(ref, result);
cudaStreamDestroy(s);
}
DECLARE_UNITTEST(TestMergeCudaStreams);
|
1c41892e843c795a7d7250c9d785a8af56d360e4.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
//
// Matrix multiplication: C = A * B.
// Host code.
//
// This sample implements matrix multiplication as described in Chapter 3
// of the programming guide and uses the CUBLAS library to demonstrate
// the best performance.
// SOME PRECAUTIONS:
// IF WE WANT TO CALCULATE ROW-MAJOR MATRIX MULTIPLY C = A * B,
// WE JUST NEED CALL CUBLAS API IN A REVERSE ORDER: cublasSegemm(B, A)!
// The reason is explained as follows:
// CUBLAS library uses column-major storage, but C/C++ use row-major storage.
// When passing the matrix pointer to CUBLAS, the memory layout alters from
// row-major to column-major, which is equivalent to an implicit transpose.
// In the case of row-major C/C++ matrix A, B, and a simple matrix multiplication
// C = A * B, we can't use the input order like hipblasSgemm(A, B) because of
// implicit transpose. The actual result of cublasSegemm(A, B) is A(T) * B(T).
// If col(A(T)) != row(B(T)), equal to row(A) != col(B), A(T) and B(T) are not
// multipliable. Moreover, even if A(T) and B(T) are multipliable, the result C
// is a column-based cublas matrix, which means C(T) in C/C++, we need extra
// transpose code to convert it to a row-based C/C++ matrix.
// To solve the problem, let's consider our desired result C, a row-major matrix.
// In cublas format, it is C(T) actually (because of the implicit transpose).
// C = A * B, so C(T) = (A * B) (T) = B(T) * A(T). Cublas matrice B(T) and A(T)
// happen to be C/C++ matrice B and A (still because of the implicit transpose)!
// We don't need extra transpose code, we only need alter the input order!
//
// CUBLAS provides high-performance matrix multiplication.
// See also:
// V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
// in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
// Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
//
// Utilities and system includes
#include <assert.h>
#include <helper_string.h> // helper for shared functions common to CUDA Samples
// CUDA runtime
#include <hip/hip_runtime.h>
#include <rocblas.h>
// CUDA and CUBLAS functions
#include <helper_functions.h>
#include <helper_cuda.h>
#ifndef min
#define min(a,b) ((a < b) ? a : b)
#endif
#ifndef max
#define max(a,b) ((a > b) ? a : b)
#endif
// This is a kernel that does no real work but runs at least for a specified number of clocks
__global__ void clock_block(clock_t *d_o, clock_t clock_count)
{
unsigned int start_clock = (unsigned int) clock();
clock_t clock_offset = 0;
while (clock_offset < clock_count)
{
unsigned int end_clock = (unsigned int) clock();
// The code below should work like
// this (thanks to modular arithmetics):
//
// clock_offset = (clock_t) (end_clock > start_clock ?
// end_clock - start_clock :
// end_clock + (0xffffffffu - start_clock));
//
// Indeed, let m = 2^32 then
// end - start = end + m - start (mod m).
clock_offset = (clock_t)(end_clock - start_clock);
}
d_o[0] = clock_offset;
}
typedef struct _matrixSize // Optional Command-line multiplier for matrix sizes
{
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
} sMatrixSize;
////////////////////////////////////////////////////////////////////////////////
//! Compute reference data set matrix multiply on CPU
//! C = A * B
//! @param C reference data, computed but preallocated
//! @param A matrix A as provided to device
//! @param B matrix B as provided to device
//! @param hA height of matrix A
//! @param wB width of matrix B
////////////////////////////////////////////////////////////////////////////////
// Allocates a matrix with random float entries.
void randomInit(float *data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void initializeCUDA(int argc, char **argv, int &devID, int &iSizeMultiple, sMatrixSize &matrix_size)
{
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
hipError_t error;
devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
error = hipSetDevice(devID);
if (error != hipSuccess)
{
printf("hipSetDevice returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
}
// get number of SMs on this GPU
error = hipGetDevice(&devID);
if (error != hipSuccess)
{
printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
if (checkCmdLineFlag(argc, (const char **)argv, "sizemult"))
{
iSizeMultiple = getCmdLineArgumentInt(argc, (const char **)argv, "sizemult");
}
// iSizeMultiple = min(iSizeMultiple, 10);
// iSizeMultiple = max(iSizeMultiple, 1);
hipDeviceProp_t deviceProp;
error = hipGetDeviceProperties(&deviceProp, devID);
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
// use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
matrix_size.uiWA = 1 * block_size * iSizeMultiple;
matrix_size.uiHA = 1 * block_size * iSizeMultiple;
matrix_size.uiWB = 1 * block_size * iSizeMultiple;
matrix_size.uiHB = 1 * block_size * iSizeMultiple;
matrix_size.uiWC = 1 * block_size * iSizeMultiple;
matrix_size.uiHC = 1 * block_size * iSizeMultiple;
printf("MatrixA(%u,%u), MatrixB(%u,%u), MatrixC(%u,%u)\n",
matrix_size.uiHA, matrix_size.uiWA,
matrix_size.uiHB, matrix_size.uiWB,
matrix_size.uiHC, matrix_size.uiWC);
if( matrix_size.uiWA != matrix_size.uiHB ||
matrix_size.uiHA != matrix_size.uiHC ||
matrix_size.uiWB != matrix_size.uiWC)
{
printf("ERROR: Matrix sizes do not match!\n");
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test matrix multiply using CUBLAS
////////////////////////////////////////////////////////////////////////////////
int matrixMultiply(int argc, char **argv, int devID, sMatrixSize &matrix_size)
{
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
// use a larger block size for Fermi and above
// int block_size = (deviceProp.major < 2) ? 16 : 32;
// set seed for rand()
srand(2006);
// allocate host memory for matrices A and B
unsigned int size_A = matrix_size.uiWA * matrix_size.uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
float *h_A2 = (float *)malloc(mem_size_A);
unsigned int size_B = matrix_size.uiWB * matrix_size.uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
float *h_B2 = (float *)malloc(mem_size_B);
// set seed for rand()
srand(2006);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_A2, size_A);
randomInit(h_B, size_B);
randomInit(h_B2, size_B);
// allocate device memory
float *d_A, *d_B, *d_C;
float *d_A2, *d_B2, *d_C2;
unsigned int size_C = matrix_size.uiWC * matrix_size.uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
// allocate host memory for the result
float *h_CUBLAS = (float *) malloc(mem_size_C);
float *h_CUBLAS2 = (float *) malloc(mem_size_C);
checkCudaErrors(hipMalloc((void **) &d_A, mem_size_A));
checkCudaErrors(hipMalloc((void **) &d_A2, mem_size_A));
checkCudaErrors(hipMalloc((void **) &d_B, mem_size_B));
checkCudaErrors(hipMalloc((void **) &d_B2, mem_size_B));
checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_A2, h_A2, mem_size_A, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_B2, h_B2, mem_size_B, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **) &d_C, mem_size_C));
checkCudaErrors(hipMalloc((void **) &d_C2, mem_size_C));
// setup execution parameters
// dim3 threads(block_size, block_size);
// dim3 grid(matrix_size.uiWC / threads.x, matrix_size.uiHC / threads.y);
hipStream_t stream1;
checkCudaErrors(hipStreamCreate(&stream1));
// checkCudaErrors(hipStreamCreate(&stream2));
// create and start timer
printf("Computing result using CUBLAS...");
// execute the kernel
int nIter = 100;
// tom
hipStream_t *tomStream = (hipStream_t *) malloc(1 * sizeof(hipStream_t));
checkCudaErrors(hipStreamCreate(&(tomStream[0])));
hipEvent_t *tomEvent = (hipEvent_t *) malloc(1 * sizeof(hipEvent_t));
checkCudaErrors(hipEventCreate(tomEvent));
float kernel_time = 1000; // time the kernel should run in ms
clock_t time_clocks = (clock_t)(kernel_time * deviceProp.clockRate);
int nbytes = 1 * sizeof(clock_t); // number of data bytes
clock_t *a = 0; // pointer to the array data in host memory
checkCudaErrors(hipHostMalloc((void **)&a, nbytes));
clock_t *d_a = 0; // pointers to data and init value in the device memory
checkCudaErrors(hipMalloc((void **)&d_a, nbytes));
// tom
// CUBLAS version 2.0
{
const float alpha = 1.0f;
const float beta = 0.0f;
hipblasHandle_t handle;
hipEvent_t start, stop;
checkCudaErrors(hipblasCreate(&handle));
//Perform warmup operation with cublas
checkCudaErrors(hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, matrix_size.uiWB, matrix_size.uiHA, matrix_size.uiWA, &alpha, d_B, matrix_size.uiWB, d_A, matrix_size.uiWA, &beta, d_C, matrix_size.uiWB));
// Allocate CUDA events that we'll use for timing
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Record the start event
checkCudaErrors(hipEventRecord(start, NULL));
// tom
hipLaunchKernelGGL(( clock_block), dim3(1),dim3(1),0,tomStream[0], &d_a[0], time_clocks);
checkCudaErrors(hipEventRecord(tomEvent[0], tomStream[0]));
checkCudaErrors(hipblasSetStream(handle, stream1));
// tom
for (int j = 0; j < nIter; j++)
{
//note cublas is column primary!
//need to transpose the order
// checkCudaErrors(hipblasSetStream(handle, stream1));
if (j == 0) {
checkCudaErrors(hipStreamWaitEvent(stream1, tomEvent[0],0));
}
checkCudaErrors(hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, matrix_size.uiWB, matrix_size.uiHA, matrix_size.uiWA, &alpha, d_B, matrix_size.uiWB, d_A, matrix_size.uiWA, &beta, d_C, matrix_size.uiWB));
// checkCudaErrors(hipblasSetStream(handle, stream2));
// if (j == 0) {
// checkCudaErrors(hipStreamWaitEvent(stream2, tomEvent[0],0));
// }
checkCudaErrors(hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, matrix_size.uiWB, matrix_size.uiHA, matrix_size.uiWA, &alpha, d_B2, matrix_size.uiWB, d_A2, matrix_size.uiWA, &beta, d_C2, matrix_size.uiWB));
}
printf("done.\n");
// Record the stop event
checkCudaErrors(hipEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
printf("Runnignt time = %.3f msec\n", msecPerMatrixMul);
checkCudaErrors(hipMemcpy(h_CUBLAS, d_C, mem_size_C, hipMemcpyDeviceToHost));
// Destroy the handle
checkCudaErrors(hipblasDestroy(handle));
}
// clean up memory
free(h_A);
free(h_B);
free(h_CUBLAS);
free(h_A2);
free(h_B2);
free(h_CUBLAS2);
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
checkCudaErrors(hipFree(d_A2));
checkCudaErrors(hipFree(d_B2));
checkCudaErrors(hipFree(d_C2));
if (true)
{
return EXIT_SUCCESS; // return value = 1
}
else
{
return EXIT_FAILURE; // return value = 0
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
printf("[Matrix Multiply CUBLAS] - Starting...\n");
int devID = 0, sizeMult = atoi(argv[1]);
sMatrixSize matrix_size;
initializeCUDA(argc, argv, devID, sizeMult, matrix_size);
int matrix_result = matrixMultiply(argc, argv, devID, matrix_size);
return matrix_result;
}
| 1c41892e843c795a7d7250c9d785a8af56d360e4.cu | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
//
// Matrix multiplication: C = A * B.
// Host code.
//
// This sample implements matrix multiplication as described in Chapter 3
// of the programming guide and uses the CUBLAS library to demonstrate
// the best performance.
// SOME PRECAUTIONS:
// IF WE WANT TO CALCULATE ROW-MAJOR MATRIX MULTIPLY C = A * B,
// WE JUST NEED CALL CUBLAS API IN A REVERSE ORDER: cublasSegemm(B, A)!
// The reason is explained as follows:
// CUBLAS library uses column-major storage, but C/C++ use row-major storage.
// When passing the matrix pointer to CUBLAS, the memory layout alters from
// row-major to column-major, which is equivalent to an implicit transpose.
// In the case of row-major C/C++ matrix A, B, and a simple matrix multiplication
// C = A * B, we can't use the input order like cublasSgemm(A, B) because of
// implicit transpose. The actual result of cublasSegemm(A, B) is A(T) * B(T).
// If col(A(T)) != row(B(T)), equal to row(A) != col(B), A(T) and B(T) are not
// multipliable. Moreover, even if A(T) and B(T) are multipliable, the result C
// is a column-based cublas matrix, which means C(T) in C/C++, we need extra
// transpose code to convert it to a row-based C/C++ matrix.
// To solve the problem, let's consider our desired result C, a row-major matrix.
// In cublas format, it is C(T) actually (because of the implicit transpose).
// C = A * B, so C(T) = (A * B) (T) = B(T) * A(T). Cublas matrice B(T) and A(T)
// happen to be C/C++ matrice B and A (still because of the implicit transpose)!
// We don't need extra transpose code, we only need alter the input order!
//
// CUBLAS provides high-performance matrix multiplication.
// See also:
// V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
// in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
// Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
//
// Utilities and system includes
#include <assert.h>
#include <helper_string.h> // helper for shared functions common to CUDA Samples
// CUDA runtime
#include <cuda_runtime.h>
#include <cublas_v2.h>
// CUDA and CUBLAS functions
#include <helper_functions.h>
#include <helper_cuda.h>
#ifndef min
#define min(a,b) ((a < b) ? a : b)
#endif
#ifndef max
#define max(a,b) ((a > b) ? a : b)
#endif
// This is a kernel that does no real work but runs at least for a specified number of clocks
__global__ void clock_block(clock_t *d_o, clock_t clock_count)
{
unsigned int start_clock = (unsigned int) clock();
clock_t clock_offset = 0;
while (clock_offset < clock_count)
{
unsigned int end_clock = (unsigned int) clock();
// The code below should work like
// this (thanks to modular arithmetics):
//
// clock_offset = (clock_t) (end_clock > start_clock ?
// end_clock - start_clock :
// end_clock + (0xffffffffu - start_clock));
//
// Indeed, let m = 2^32 then
// end - start = end + m - start (mod m).
clock_offset = (clock_t)(end_clock - start_clock);
}
d_o[0] = clock_offset;
}
typedef struct _matrixSize // Optional Command-line multiplier for matrix sizes
{
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
} sMatrixSize;
////////////////////////////////////////////////////////////////////////////////
//! Compute reference data set matrix multiply on CPU
//! C = A * B
//! @param C reference data, computed but preallocated
//! @param A matrix A as provided to device
//! @param B matrix B as provided to device
//! @param hA height of matrix A
//! @param wB width of matrix B
////////////////////////////////////////////////////////////////////////////////
// Allocates a matrix with random float entries.
void randomInit(float *data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void initializeCUDA(int argc, char **argv, int &devID, int &iSizeMultiple, sMatrixSize &matrix_size)
{
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
cudaError_t error;
devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
error = cudaSetDevice(devID);
if (error != cudaSuccess)
{
printf("cudaSetDevice returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
}
// get number of SMs on this GPU
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
if (checkCmdLineFlag(argc, (const char **)argv, "sizemult"))
{
iSizeMultiple = getCmdLineArgumentInt(argc, (const char **)argv, "sizemult");
}
// iSizeMultiple = min(iSizeMultiple, 10);
// iSizeMultiple = max(iSizeMultiple, 1);
cudaDeviceProp deviceProp;
error = cudaGetDeviceProperties(&deviceProp, devID);
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
// use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
matrix_size.uiWA = 1 * block_size * iSizeMultiple;
matrix_size.uiHA = 1 * block_size * iSizeMultiple;
matrix_size.uiWB = 1 * block_size * iSizeMultiple;
matrix_size.uiHB = 1 * block_size * iSizeMultiple;
matrix_size.uiWC = 1 * block_size * iSizeMultiple;
matrix_size.uiHC = 1 * block_size * iSizeMultiple;
printf("MatrixA(%u,%u), MatrixB(%u,%u), MatrixC(%u,%u)\n",
matrix_size.uiHA, matrix_size.uiWA,
matrix_size.uiHB, matrix_size.uiWB,
matrix_size.uiHC, matrix_size.uiWC);
if( matrix_size.uiWA != matrix_size.uiHB ||
matrix_size.uiHA != matrix_size.uiHC ||
matrix_size.uiWB != matrix_size.uiWC)
{
printf("ERROR: Matrix sizes do not match!\n");
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test matrix multiply using CUBLAS
////////////////////////////////////////////////////////////////////////////////
int matrixMultiply(int argc, char **argv, int devID, sMatrixSize &matrix_size)
{
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
// use a larger block size for Fermi and above
// int block_size = (deviceProp.major < 2) ? 16 : 32;
// set seed for rand()
srand(2006);
// allocate host memory for matrices A and B
unsigned int size_A = matrix_size.uiWA * matrix_size.uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
float *h_A2 = (float *)malloc(mem_size_A);
unsigned int size_B = matrix_size.uiWB * matrix_size.uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
float *h_B2 = (float *)malloc(mem_size_B);
// set seed for rand()
srand(2006);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_A2, size_A);
randomInit(h_B, size_B);
randomInit(h_B2, size_B);
// allocate device memory
float *d_A, *d_B, *d_C;
float *d_A2, *d_B2, *d_C2;
unsigned int size_C = matrix_size.uiWC * matrix_size.uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
// allocate host memory for the result
float *h_CUBLAS = (float *) malloc(mem_size_C);
float *h_CUBLAS2 = (float *) malloc(mem_size_C);
checkCudaErrors(cudaMalloc((void **) &d_A, mem_size_A));
checkCudaErrors(cudaMalloc((void **) &d_A2, mem_size_A));
checkCudaErrors(cudaMalloc((void **) &d_B, mem_size_B));
checkCudaErrors(cudaMalloc((void **) &d_B2, mem_size_B));
checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_A2, h_A2, mem_size_A, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_B2, h_B2, mem_size_B, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **) &d_C, mem_size_C));
checkCudaErrors(cudaMalloc((void **) &d_C2, mem_size_C));
// setup execution parameters
// dim3 threads(block_size, block_size);
// dim3 grid(matrix_size.uiWC / threads.x, matrix_size.uiHC / threads.y);
cudaStream_t stream1;
checkCudaErrors(cudaStreamCreate(&stream1));
// checkCudaErrors(cudaStreamCreate(&stream2));
// create and start timer
printf("Computing result using CUBLAS...");
// execute the kernel
int nIter = 100;
// tom
cudaStream_t *tomStream = (cudaStream_t *) malloc(1 * sizeof(cudaStream_t));
checkCudaErrors(cudaStreamCreate(&(tomStream[0])));
cudaEvent_t *tomEvent = (cudaEvent_t *) malloc(1 * sizeof(cudaEvent_t));
checkCudaErrors(cudaEventCreate(tomEvent));
float kernel_time = 1000; // time the kernel should run in ms
clock_t time_clocks = (clock_t)(kernel_time * deviceProp.clockRate);
int nbytes = 1 * sizeof(clock_t); // number of data bytes
clock_t *a = 0; // pointer to the array data in host memory
checkCudaErrors(cudaMallocHost((void **)&a, nbytes));
clock_t *d_a = 0; // pointers to data and init value in the device memory
checkCudaErrors(cudaMalloc((void **)&d_a, nbytes));
// tom
// CUBLAS version 2.0
{
const float alpha = 1.0f;
const float beta = 0.0f;
cublasHandle_t handle;
cudaEvent_t start, stop;
checkCudaErrors(cublasCreate(&handle));
//Perform warmup operation with cublas
checkCudaErrors(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, matrix_size.uiWB, matrix_size.uiHA, matrix_size.uiWA, &alpha, d_B, matrix_size.uiWB, d_A, matrix_size.uiWA, &beta, d_C, matrix_size.uiWB));
// Allocate CUDA events that we'll use for timing
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Record the start event
checkCudaErrors(cudaEventRecord(start, NULL));
// tom
clock_block<<<1,1,0,tomStream[0]>>>(&d_a[0], time_clocks);
checkCudaErrors(cudaEventRecord(tomEvent[0], tomStream[0]));
checkCudaErrors(cublasSetStream(handle, stream1));
// tom
for (int j = 0; j < nIter; j++)
{
//note cublas is column primary!
//need to transpose the order
// checkCudaErrors(cublasSetStream(handle, stream1));
if (j == 0) {
checkCudaErrors(cudaStreamWaitEvent(stream1, tomEvent[0],0));
}
checkCudaErrors(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, matrix_size.uiWB, matrix_size.uiHA, matrix_size.uiWA, &alpha, d_B, matrix_size.uiWB, d_A, matrix_size.uiWA, &beta, d_C, matrix_size.uiWB));
// checkCudaErrors(cublasSetStream(handle, stream2));
// if (j == 0) {
// checkCudaErrors(cudaStreamWaitEvent(stream2, tomEvent[0],0));
// }
checkCudaErrors(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, matrix_size.uiWB, matrix_size.uiHA, matrix_size.uiWA, &alpha, d_B2, matrix_size.uiWB, d_A2, matrix_size.uiWA, &beta, d_C2, matrix_size.uiWB));
}
printf("done.\n");
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
printf("Runnignt time = %.3f msec\n", msecPerMatrixMul);
checkCudaErrors(cudaMemcpy(h_CUBLAS, d_C, mem_size_C, cudaMemcpyDeviceToHost));
// Destroy the handle
checkCudaErrors(cublasDestroy(handle));
}
// clean up memory
free(h_A);
free(h_B);
free(h_CUBLAS);
free(h_A2);
free(h_B2);
free(h_CUBLAS2);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
checkCudaErrors(cudaFree(d_A2));
checkCudaErrors(cudaFree(d_B2));
checkCudaErrors(cudaFree(d_C2));
if (true)
{
return EXIT_SUCCESS; // return value = 1
}
else
{
return EXIT_FAILURE; // return value = 0
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
printf("[Matrix Multiply CUBLAS] - Starting...\n");
int devID = 0, sizeMult = atoi(argv[1]);
sMatrixSize matrix_size;
initializeCUDA(argc, argv, devID, sizeMult, matrix_size);
int matrix_result = matrixMultiply(argc, argv, devID, matrix_size);
return matrix_result;
}
|
73a1383a1fc6ebcfe668814c676e2567a22a2806.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
// Number of threads
#define BLOCK_SIZE 16
#define GRID_SIZE 256
#define CUDA_CALL(x) {if((x) != hipSuccess){ \
printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", hipGetErrorString(hipGetLastError())); \
exit(EXIT_FAILURE);}}
// nCentroids and size on device
__constant__ int dev_nCentroids;
__constant__ int dev_size;
// global variables
int CLUSTER_BYTES = 0; // nCentroids * sizeof(int)
int IMAGE_BYTES = 0; // width * height * sizeof(int)
//R,G,B Centroid's triple on device
__constant__ int dev_RedCentroid[20];
__constant__ int dev_GreenCentroid[20];
__constant__ int dev_BlueCentroid[20];
void initialise_centroids(int nCentroids, int* redCentroid, int* greenCentroid, int* blueCentroid, int r[], int g[], int b[], int size) {
int i;
for(i=0;i<nCentroids;++i)
{
int index = rand()%size;
redCentroid[i] = r[index];
blueCentroid[i] = b[index];
greenCentroid[i] = g[index];
}
}
bool loadRawImage(char* filename, int* r, int* g, int* b, int size) {
FILE *imageFile;
imageFile = fopen(filename, "r");
if (imageFile == NULL) {
return false;
} else {
for (int i = 0; i < size; i++) {
r[i] = fgetc(imageFile);
g[i] = fgetc(imageFile);
b[i] = fgetc(imageFile);
}
fclose(imageFile);
/*for(int j = 0; j < h * w; j++) {
printf("%d, %d, %d ", r[j], g[j], b[j]);
}*/
return true;
}
}
bool writeRawImage(char* filename, int* labelArray, int* redCentroid, int* greenCentroid, int* blueCentroid, int size){
FILE *imageFile;
imageFile = fopen(filename, "wb");
if(imageFile == NULL) {
return false;
} else {
for (int i = 0; i < size; i++) {
fputc((char) redCentroid[labelArray[i]], imageFile);
fputc((char) greenCentroid[labelArray[i]], imageFile);
fputc((char) blueCentroid[labelArray[i]], imageFile);
}
fclose(imageFile);
return true;
}
}
//Clears arrays before each kernel getClusterLabel iteration
__global__ void clearArrays(int *dev_sumRed,int *dev_sumGreen,int *dev_sumBlue, int* dev_pixelClusterCounter, int* dev_tempRedCentroid, int* dev_tempGreenCentroid, int* dev_tempBlueCentroid ) {
// 1 block, 16x16 threads
int threadID = threadIdx.x + threadIdx.y * blockDim.x;
if(threadID < dev_nCentroids) {
// nCentroids long
dev_sumRed[threadID] = 0;
dev_sumGreen[threadID] = 0;
dev_sumBlue[threadID] = 0;
dev_pixelClusterCounter[threadID] = 0;
dev_tempRedCentroid[threadID] = 0;
dev_tempGreenCentroid[threadID] = 0;
dev_tempBlueCentroid[threadID] = 0;
}
}// end clearArrays
/*
* Clear label array before each kernel getClusterLabel iteration
*/
__global__ void clearLabelArray(int *dev_labelArray){
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
//int threadID = (blockIdx.y*gridDim.x + blockIdx.x)*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
// labelArray is "size" long
if(threadID < dev_size) {
dev_labelArray[threadID] = 0;
}
}// end clearLabelArray
__global__ void getClusterLabel(int *dev_Red,int *dev_Green,int *dev_Blue,int *dev_labelArray) {
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
//int threadID = (blockIdx.y*gridDim.x + blockIdx.x)*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
//default min value of distance
float min = 500.0, value;
//will be label
int index = 0;
if(threadID < dev_size) {
// Finding the nearest centroid to current triple identified by threadID thread
for(int i = 0; i < dev_nCentroids; i++) {
// Performing Euclidean distance, Saving current value
value = sqrtf(powf((dev_Red[threadID]-dev_RedCentroid[i]),2.0) + powf((dev_Green[threadID]-dev_GreenCentroid[i]),2.0) + powf((dev_Blue[threadID]-dev_BlueCentroid[i]),2.0));
if(value < min){
// saving new nearest centroid
min = value;
// Updating his index
index = i;
}
}// end for
// Writing to global memory the index of the nearest centroid
// for dev_Red[threadID], dev_Green[threadID], dev_Blue[threadID] pixel triple
dev_labelArray[threadID] = index;
}// end if
}// end getClusterLabel
/*
* Summing Red, Green, Blue values per cluster
* Counting how many pixels there are in each cluster
*
*/
__global__ void sumCluster(int *dev_Red,int *dev_Green,int *dev_Blue,int *dev_sumRed,int *dev_sumGreen,int *dev_sumBlue,int *dev_labelArray,int *dev_pixelClusterCounter) {
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
//int threadID = (blockIdx.y*gridDim.x + blockIdx.x)*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
if(threadID < dev_size) {
int currentLabelArray = dev_labelArray[threadID];
int currentRed = dev_Red[threadID];
int currentGreen = dev_Green[threadID];
int currentBlue = dev_Blue[threadID];
// Writing to global memory needs a serialization. Many threads are writing into the same few locations
atomicAdd(&dev_sumRed[currentLabelArray], currentRed);
atomicAdd(&dev_sumGreen[currentLabelArray], currentGreen);
atomicAdd(&dev_sumBlue[currentLabelArray], currentBlue);
atomicAdd(&dev_pixelClusterCounter[currentLabelArray], 1);
}
}// end sumCluster
/*
* Calculates the new R,G,B values of the centroids dividing the sum of color (for each channel) by the number of pixels in that cluster
* New values are stored in global memory since the current R,G,B values of the centroids are in read-only constant memory.
*/
__global__ void updateCentroids(int *dev_tempRedCentroid, int *dev_tempGreenCentroid, int *dev_tempBlueCentroid,int* dev_sumRed, int *dev_sumGreen,int *dev_sumBlue, int* dev_pixelClusterCounter,int *dev_flag) {
// 1 block , 16*16 threads
int threadID = threadIdx.x + threadIdx.y * blockDim.x;
if(threadID < dev_nCentroids) {
int currentPixelCounter = dev_pixelClusterCounter[threadID];
int sumRed = dev_sumRed[threadID];
int sumGreen = dev_sumGreen[threadID];
int sumBlue = dev_sumBlue[threadID];
//new RGB Centroids' values written in global memory
dev_tempRedCentroid[threadID] = (int)(sumRed/currentPixelCounter);
dev_tempGreenCentroid[threadID] = (int)(sumGreen/currentPixelCounter);
dev_tempBlueCentroid[threadID] = (int)(sumBlue/currentPixelCounter);
if(dev_tempGreenCentroid[threadID]!=dev_GreenCentroid[threadID] || dev_tempRedCentroid[threadID]!=dev_RedCentroid[threadID] || dev_tempBlueCentroid[threadID]!=dev_BlueCentroid[threadID])
*dev_flag=1;
}
}// end updateCentroids
/*******************************************************************/
int main(int argc, char *argv[]) {
hipSetDevice(0);
hipDeviceSynchronize();
hipDeviceSynchronize();
char *inputFile, *outputFile;
int *r, *g, *b, *redCentroid, *greenCentroid, *blueCentroid;
int *dev_Red, *dev_Green, *dev_Blue, *dev_tempRedCentroid, *dev_tempGreenCentroid, *dev_tempBlueCentroid;
int *labelArray, *dev_labelArray;
int width, height, nCentroids, nIterations,size;
//int IMAGE_BYTES, CLUSTER_BYTES;
int *pixelClusterCounter, *dev_pixelClusterCounter;
int *sumRed, *sumGreen, *sumBlue;
int flag = 0;
int *dev_sumRed, *dev_sumGreen, *dev_sumBlue;
int *dev_flag;
inputFile = argv[1];
outputFile = argv[2];
width = atoi(argv[3]);
height = atoi(argv[4]);
nCentroids = atoi(argv[5]);
nIterations = atoi(argv[6]);
// Setting image size in bytes
IMAGE_BYTES = width * height * sizeof(int);
CLUSTER_BYTES = nCentroids * sizeof(int);
size = width * height;
printf("Image: %s\n",inputFile);
printf("Width: %d, Height: %d\n", width, height);
printf("#Clusters: %d, #Iterations: %d\n", nCentroids, nIterations);
r = (int*)(malloc(IMAGE_BYTES));
g = (int*)(malloc(IMAGE_BYTES));
b = (int*)(malloc(IMAGE_BYTES));
redCentroid = (int*)(malloc(CLUSTER_BYTES));
greenCentroid = (int*)(malloc(CLUSTER_BYTES));
blueCentroid = (int*)(malloc(CLUSTER_BYTES));
labelArray = (int*)(malloc(IMAGE_BYTES)); //stores the cluster number for each pixel
sumRed = (int*)(malloc(CLUSTER_BYTES));
sumGreen = (int*)(malloc(CLUSTER_BYTES));
sumBlue = (int*)(malloc(CLUSTER_BYTES));
pixelClusterCounter = (int*)(malloc(CLUSTER_BYTES));
// Loading image in r, g, b arrays
printf("Image loading...\n");
if (loadRawImage(inputFile, r, g, b, size)) {
printf("Image loaded!\n");
} else {
printf("NOT loaded!\n");
return -1;
}
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Setting initial centroids
printf("Initial Centroids: \n");
initialise_centroids(nCentroids, redCentroid, greenCentroid, blueCentroid,r,g,b,size);
printf("\n");
if(IMAGE_BYTES == 0 || CLUSTER_BYTES == 0) {
return -1;
}
// allocate memory on GPU
CUDA_CALL(hipMalloc((void**) &dev_Red, IMAGE_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_Green, IMAGE_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_Blue, IMAGE_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_tempRedCentroid, CLUSTER_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_tempGreenCentroid, CLUSTER_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_tempBlueCentroid, CLUSTER_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_labelArray, IMAGE_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_sumRed, CLUSTER_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_sumGreen, CLUSTER_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_sumBlue, CLUSTER_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_pixelClusterCounter, CLUSTER_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_flag, sizeof(int)));
// copy host CPU memory to GPU
CUDA_CALL(hipMemcpy(dev_Red, r, IMAGE_BYTES, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(dev_Green, g, IMAGE_BYTES, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(dev_Blue, b, IMAGE_BYTES, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(dev_tempRedCentroid, redCentroid,CLUSTER_BYTES,hipMemcpyHostToDevice ));
CUDA_CALL(hipMemcpy(dev_tempGreenCentroid, greenCentroid,CLUSTER_BYTES,hipMemcpyHostToDevice ));
CUDA_CALL(hipMemcpy(dev_tempBlueCentroid, blueCentroid,CLUSTER_BYTES,hipMemcpyHostToDevice ));
CUDA_CALL(hipMemcpy(dev_labelArray, labelArray, IMAGE_BYTES, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(dev_flag,&flag,sizeof(int),hipMemcpyHostToDevice));
//CUDA_CALL(hipMemcpy(dev_sumRed, sumRed, CLUSTER_BYTES, hipMemcpyHostToDevice));
//CUDA_CALL(hipMemcpy(dev_sumGreen, sumGreen, CLUSTER_BYTES, hipMemcpyHostToDevice));
//CUDA_CALL(hipMemcpy(dev_sumBlue, sumBlue, CLUSTER_BYTES, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(dev_pixelClusterCounter, pixelClusterCounter, CLUSTER_BYTES, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpyToSymbol(dev_RedCentroid, redCentroid, CLUSTER_BYTES));
CUDA_CALL(hipMemcpyToSymbol(dev_GreenCentroid, greenCentroid, CLUSTER_BYTES));
CUDA_CALL(hipMemcpyToSymbol(dev_BlueCentroid, blueCentroid, CLUSTER_BYTES));
CUDA_CALL(hipMemcpyToSymbol(dev_nCentroids,&nCentroids, sizeof(int)));
CUDA_CALL(hipMemcpyToSymbol(dev_size, &size, sizeof(int)));
// Clearing centroids on host
for(int i = 0; i < nCentroids; i++) {
redCentroid[i] = 0;
greenCentroid[i] = 0;
blueCentroid[i] = 0;
}
// Defining grid size
int BLOCK_X, BLOCK_Y;
BLOCK_X = ceil(width/BLOCK_SIZE);
BLOCK_Y = ceil(height/BLOCK_SIZE);
if(BLOCK_X > GRID_SIZE)
BLOCK_X = GRID_SIZE;
if(BLOCK_Y > GRID_SIZE)
BLOCK_Y = GRID_SIZE;
dim3 dimGRID(BLOCK_X,BLOCK_Y);
dim3 dimBLOCK(BLOCK_SIZE,BLOCK_SIZE);
//Starting timer
hipEventRecord(start, 0);
printf("Launching K-Means Kernels.. \n");
//Iteration of kmeans algorithm
int num_iterations;
for(int i = 0; i < nIterations; i++) {
num_iterations = i;
flag=0;
CUDA_CALL(hipMemcpy(dev_flag,&flag,sizeof(int),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( clearArrays), dim3(1), dim3(dimBLOCK), 0, 0, dev_sumRed, dev_sumGreen, dev_sumBlue, dev_pixelClusterCounter, dev_tempRedCentroid, dev_tempGreenCentroid, dev_tempBlueCentroid);
hipLaunchKernelGGL(( clearLabelArray), dim3(dimGRID), dim3(dimBLOCK), 0, 0, dev_labelArray);
hipLaunchKernelGGL(( getClusterLabel), dim3(dimGRID), dim3(dimBLOCK) , 0, 0, dev_Red, dev_Green, dev_Blue,dev_labelArray);
hipLaunchKernelGGL(( sumCluster), dim3(dimGRID), dim3(dimBLOCK), 0, 0, dev_Red, dev_Green, dev_Blue, dev_sumRed, dev_sumGreen, dev_sumBlue, dev_labelArray,dev_pixelClusterCounter);
hipLaunchKernelGGL(( updateCentroids), dim3(1),dim3(dimBLOCK) , 0, 0, dev_tempRedCentroid, dev_tempGreenCentroid, dev_tempBlueCentroid, dev_sumRed, dev_sumGreen, dev_sumBlue, dev_pixelClusterCounter,dev_flag);
CUDA_CALL(hipMemcpy(redCentroid, dev_tempRedCentroid, CLUSTER_BYTES,hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(greenCentroid, dev_tempGreenCentroid, CLUSTER_BYTES,hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(blueCentroid, dev_tempBlueCentroid, CLUSTER_BYTES,hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(&flag, dev_flag,sizeof(int),hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpyToSymbol(dev_RedCentroid, redCentroid, CLUSTER_BYTES));
CUDA_CALL(hipMemcpyToSymbol(dev_GreenCentroid, greenCentroid, CLUSTER_BYTES));
CUDA_CALL(hipMemcpyToSymbol(dev_BlueCentroid, blueCentroid, CLUSTER_BYTES));
if(flag==0)
break;
}
hipEventRecord(stop, 0);
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
CUDA_CALL(hipMemcpy(labelArray, dev_labelArray, IMAGE_BYTES, hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(sumRed, dev_sumRed, CLUSTER_BYTES, hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(sumGreen, dev_sumGreen, CLUSTER_BYTES, hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(sumBlue, dev_sumBlue, CLUSTER_BYTES, hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(pixelClusterCounter, dev_pixelClusterCounter, CLUSTER_BYTES, hipMemcpyDeviceToHost));
printf("Kmeans code ran in: %f secs.\n", elapsed/1000.0);
printf("Converged in %d iterations.\n",num_iterations);
printf("\n");
// labelArray DEBUG
int counter = 0;
printf("Label Array:\n");
for(int i = 0; i < (size); i++) {
//printf("%d\n", labelArray[i]);
counter++;
}
printf("printing counter %d\n", counter);
counter = 0;
printf("Sum Arrays:\n");
for(int j = 0; j < nCentroids; j++) {
printf("r: %u g: %u b: %u \n", sumRed[j], sumGreen[j], sumBlue[j]);
counter++;
}
printf("\n");
printf("Pixels per centroids:\n");
for(int k = 0; k < nCentroids; k++){
printf("%d centroid: %d pixels\n", k, pixelClusterCounter[k]);
}
printf("\n");
printf("New centroids:\n");
for(int i = 0; i < nCentroids; i++) {
printf("%d, %d, %d \n", redCentroid[i], greenCentroid[i], blueCentroid[i]);
}
// writing...
printf("Image writing...\n");
if (writeRawImage(outputFile,labelArray, redCentroid, greenCentroid, blueCentroid, size)) {
printf("Image written!\n");
} else {
printf("NOT written!\n");
return -1;
}
free(r);
free(g);
free(b);
free(redCentroid);
free(greenCentroid);
free(blueCentroid);
free(labelArray);
free(sumRed);
free(sumGreen);
free(sumBlue);
free(pixelClusterCounter);
CUDA_CALL(hipFree(dev_Red));
CUDA_CALL(hipFree(dev_Green));
CUDA_CALL(hipFree(dev_Blue));
CUDA_CALL(hipFree(dev_tempRedCentroid));
CUDA_CALL(hipFree(dev_tempGreenCentroid));
CUDA_CALL(hipFree(dev_tempBlueCentroid));
CUDA_CALL(hipFree(dev_labelArray));
CUDA_CALL(hipFree(dev_sumRed));
CUDA_CALL(hipFree(dev_sumGreen));
CUDA_CALL(hipFree(dev_sumBlue));
CUDA_CALL(hipFree(dev_pixelClusterCounter));
printf("That's the end.\n");
return 0;
}
| 73a1383a1fc6ebcfe668814c676e2567a22a2806.cu |
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
// Number of threads
#define BLOCK_SIZE 16
#define GRID_SIZE 256
#define CUDA_CALL(x) {if((x) != cudaSuccess){ \
printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \
exit(EXIT_FAILURE);}}
// nCentroids and size on device
__constant__ int dev_nCentroids;
__constant__ int dev_size;
// global variables
int CLUSTER_BYTES = 0; // nCentroids * sizeof(int)
int IMAGE_BYTES = 0; // width * height * sizeof(int)
//R,G,B Centroid's triple on device
__constant__ int dev_RedCentroid[20];
__constant__ int dev_GreenCentroid[20];
__constant__ int dev_BlueCentroid[20];
void initialise_centroids(int nCentroids, int* redCentroid, int* greenCentroid, int* blueCentroid, int r[], int g[], int b[], int size) {
int i;
for(i=0;i<nCentroids;++i)
{
int index = rand()%size;
redCentroid[i] = r[index];
blueCentroid[i] = b[index];
greenCentroid[i] = g[index];
}
}
bool loadRawImage(char* filename, int* r, int* g, int* b, int size) {
FILE *imageFile;
imageFile = fopen(filename, "r");
if (imageFile == NULL) {
return false;
} else {
for (int i = 0; i < size; i++) {
r[i] = fgetc(imageFile);
g[i] = fgetc(imageFile);
b[i] = fgetc(imageFile);
}
fclose(imageFile);
/*for(int j = 0; j < h * w; j++) {
printf("%d, %d, %d ", r[j], g[j], b[j]);
}*/
return true;
}
}
bool writeRawImage(char* filename, int* labelArray, int* redCentroid, int* greenCentroid, int* blueCentroid, int size){
FILE *imageFile;
imageFile = fopen(filename, "wb");
if(imageFile == NULL) {
return false;
} else {
for (int i = 0; i < size; i++) {
fputc((char) redCentroid[labelArray[i]], imageFile);
fputc((char) greenCentroid[labelArray[i]], imageFile);
fputc((char) blueCentroid[labelArray[i]], imageFile);
}
fclose(imageFile);
return true;
}
}
//Clears arrays before each kernel getClusterLabel iteration
__global__ void clearArrays(int *dev_sumRed,int *dev_sumGreen,int *dev_sumBlue, int* dev_pixelClusterCounter, int* dev_tempRedCentroid, int* dev_tempGreenCentroid, int* dev_tempBlueCentroid ) {
// 1 block, 16x16 threads
int threadID = threadIdx.x + threadIdx.y * blockDim.x;
if(threadID < dev_nCentroids) {
// nCentroids long
dev_sumRed[threadID] = 0;
dev_sumGreen[threadID] = 0;
dev_sumBlue[threadID] = 0;
dev_pixelClusterCounter[threadID] = 0;
dev_tempRedCentroid[threadID] = 0;
dev_tempGreenCentroid[threadID] = 0;
dev_tempBlueCentroid[threadID] = 0;
}
}// end clearArrays
/*
* Clear label array before each kernel getClusterLabel iteration
*/
__global__ void clearLabelArray(int *dev_labelArray){
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
//int threadID = (blockIdx.y*gridDim.x + blockIdx.x)*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
// labelArray is "size" long
if(threadID < dev_size) {
dev_labelArray[threadID] = 0;
}
}// end clearLabelArray
__global__ void getClusterLabel(int *dev_Red,int *dev_Green,int *dev_Blue,int *dev_labelArray) {
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
//int threadID = (blockIdx.y*gridDim.x + blockIdx.x)*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
//default min value of distance
float min = 500.0, value;
//will be label
int index = 0;
if(threadID < dev_size) {
// Finding the nearest centroid to current triple identified by threadID thread
for(int i = 0; i < dev_nCentroids; i++) {
// Performing Euclidean distance, Saving current value
value = sqrtf(powf((dev_Red[threadID]-dev_RedCentroid[i]),2.0) + powf((dev_Green[threadID]-dev_GreenCentroid[i]),2.0) + powf((dev_Blue[threadID]-dev_BlueCentroid[i]),2.0));
if(value < min){
// saving new nearest centroid
min = value;
// Updating his index
index = i;
}
}// end for
// Writing to global memory the index of the nearest centroid
// for dev_Red[threadID], dev_Green[threadID], dev_Blue[threadID] pixel triple
dev_labelArray[threadID] = index;
}// end if
}// end getClusterLabel
/*
* Summing Red, Green, Blue values per cluster
* Counting how many pixels there are in each cluster
*
*/
__global__ void sumCluster(int *dev_Red,int *dev_Green,int *dev_Blue,int *dev_sumRed,int *dev_sumGreen,int *dev_sumBlue,int *dev_labelArray,int *dev_pixelClusterCounter) {
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
//int threadID = (blockIdx.y*gridDim.x + blockIdx.x)*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
if(threadID < dev_size) {
int currentLabelArray = dev_labelArray[threadID];
int currentRed = dev_Red[threadID];
int currentGreen = dev_Green[threadID];
int currentBlue = dev_Blue[threadID];
// Writing to global memory needs a serialization. Many threads are writing into the same few locations
atomicAdd(&dev_sumRed[currentLabelArray], currentRed);
atomicAdd(&dev_sumGreen[currentLabelArray], currentGreen);
atomicAdd(&dev_sumBlue[currentLabelArray], currentBlue);
atomicAdd(&dev_pixelClusterCounter[currentLabelArray], 1);
}
}// end sumCluster
/*
* Calculates the new R,G,B values of the centroids dividing the sum of color (for each channel) by the number of pixels in that cluster
* New values are stored in global memory since the current R,G,B values of the centroids are in read-only constant memory.
*/
__global__ void updateCentroids(int *dev_tempRedCentroid, int *dev_tempGreenCentroid, int *dev_tempBlueCentroid,int* dev_sumRed, int *dev_sumGreen,int *dev_sumBlue, int* dev_pixelClusterCounter,int *dev_flag) {
// 1 block , 16*16 threads
int threadID = threadIdx.x + threadIdx.y * blockDim.x;
if(threadID < dev_nCentroids) {
int currentPixelCounter = dev_pixelClusterCounter[threadID];
int sumRed = dev_sumRed[threadID];
int sumGreen = dev_sumGreen[threadID];
int sumBlue = dev_sumBlue[threadID];
//new RGB Centroids' values written in global memory
dev_tempRedCentroid[threadID] = (int)(sumRed/currentPixelCounter);
dev_tempGreenCentroid[threadID] = (int)(sumGreen/currentPixelCounter);
dev_tempBlueCentroid[threadID] = (int)(sumBlue/currentPixelCounter);
if(dev_tempGreenCentroid[threadID]!=dev_GreenCentroid[threadID] || dev_tempRedCentroid[threadID]!=dev_RedCentroid[threadID] || dev_tempBlueCentroid[threadID]!=dev_BlueCentroid[threadID])
*dev_flag=1;
}
}// end updateCentroids
/*******************************************************************/
int main(int argc, char *argv[]) {
cudaSetDevice(0);
cudaDeviceSynchronize();
cudaThreadSynchronize();
char *inputFile, *outputFile;
int *r, *g, *b, *redCentroid, *greenCentroid, *blueCentroid;
int *dev_Red, *dev_Green, *dev_Blue, *dev_tempRedCentroid, *dev_tempGreenCentroid, *dev_tempBlueCentroid;
int *labelArray, *dev_labelArray;
int width, height, nCentroids, nIterations,size;
//int IMAGE_BYTES, CLUSTER_BYTES;
int *pixelClusterCounter, *dev_pixelClusterCounter;
int *sumRed, *sumGreen, *sumBlue;
int flag = 0;
int *dev_sumRed, *dev_sumGreen, *dev_sumBlue;
int *dev_flag;
inputFile = argv[1];
outputFile = argv[2];
width = atoi(argv[3]);
height = atoi(argv[4]);
nCentroids = atoi(argv[5]);
nIterations = atoi(argv[6]);
// Setting image size in bytes
IMAGE_BYTES = width * height * sizeof(int);
CLUSTER_BYTES = nCentroids * sizeof(int);
size = width * height;
printf("Image: %s\n",inputFile);
printf("Width: %d, Height: %d\n", width, height);
printf("#Clusters: %d, #Iterations: %d\n", nCentroids, nIterations);
r = (int*)(malloc(IMAGE_BYTES));
g = (int*)(malloc(IMAGE_BYTES));
b = (int*)(malloc(IMAGE_BYTES));
redCentroid = (int*)(malloc(CLUSTER_BYTES));
greenCentroid = (int*)(malloc(CLUSTER_BYTES));
blueCentroid = (int*)(malloc(CLUSTER_BYTES));
labelArray = (int*)(malloc(IMAGE_BYTES)); //stores the cluster number for each pixel
sumRed = (int*)(malloc(CLUSTER_BYTES));
sumGreen = (int*)(malloc(CLUSTER_BYTES));
sumBlue = (int*)(malloc(CLUSTER_BYTES));
pixelClusterCounter = (int*)(malloc(CLUSTER_BYTES));
// Loading image in r, g, b arrays
printf("Image loading...\n");
if (loadRawImage(inputFile, r, g, b, size)) {
printf("Image loaded!\n");
} else {
printf("NOT loaded!\n");
return -1;
}
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Setting initial centroids
printf("Initial Centroids: \n");
initialise_centroids(nCentroids, redCentroid, greenCentroid, blueCentroid,r,g,b,size);
printf("\n");
if(IMAGE_BYTES == 0 || CLUSTER_BYTES == 0) {
return -1;
}
// allocate memory on GPU
CUDA_CALL(cudaMalloc((void**) &dev_Red, IMAGE_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_Green, IMAGE_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_Blue, IMAGE_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_tempRedCentroid, CLUSTER_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_tempGreenCentroid, CLUSTER_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_tempBlueCentroid, CLUSTER_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_labelArray, IMAGE_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_sumRed, CLUSTER_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_sumGreen, CLUSTER_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_sumBlue, CLUSTER_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_pixelClusterCounter, CLUSTER_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_flag, sizeof(int)));
// copy host CPU memory to GPU
CUDA_CALL(cudaMemcpy(dev_Red, r, IMAGE_BYTES, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(dev_Green, g, IMAGE_BYTES, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(dev_Blue, b, IMAGE_BYTES, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(dev_tempRedCentroid, redCentroid,CLUSTER_BYTES,cudaMemcpyHostToDevice ));
CUDA_CALL(cudaMemcpy(dev_tempGreenCentroid, greenCentroid,CLUSTER_BYTES,cudaMemcpyHostToDevice ));
CUDA_CALL(cudaMemcpy(dev_tempBlueCentroid, blueCentroid,CLUSTER_BYTES,cudaMemcpyHostToDevice ));
CUDA_CALL(cudaMemcpy(dev_labelArray, labelArray, IMAGE_BYTES, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(dev_flag,&flag,sizeof(int),cudaMemcpyHostToDevice));
//CUDA_CALL(cudaMemcpy(dev_sumRed, sumRed, CLUSTER_BYTES, cudaMemcpyHostToDevice));
//CUDA_CALL(cudaMemcpy(dev_sumGreen, sumGreen, CLUSTER_BYTES, cudaMemcpyHostToDevice));
//CUDA_CALL(cudaMemcpy(dev_sumBlue, sumBlue, CLUSTER_BYTES, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(dev_pixelClusterCounter, pixelClusterCounter, CLUSTER_BYTES, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(dev_RedCentroid, redCentroid, CLUSTER_BYTES));
CUDA_CALL(cudaMemcpyToSymbol(dev_GreenCentroid, greenCentroid, CLUSTER_BYTES));
CUDA_CALL(cudaMemcpyToSymbol(dev_BlueCentroid, blueCentroid, CLUSTER_BYTES));
CUDA_CALL(cudaMemcpyToSymbol(dev_nCentroids,&nCentroids, sizeof(int)));
CUDA_CALL(cudaMemcpyToSymbol(dev_size, &size, sizeof(int)));
// Clearing centroids on host
for(int i = 0; i < nCentroids; i++) {
redCentroid[i] = 0;
greenCentroid[i] = 0;
blueCentroid[i] = 0;
}
// Defining grid size
int BLOCK_X, BLOCK_Y;
BLOCK_X = ceil(width/BLOCK_SIZE);
BLOCK_Y = ceil(height/BLOCK_SIZE);
if(BLOCK_X > GRID_SIZE)
BLOCK_X = GRID_SIZE;
if(BLOCK_Y > GRID_SIZE)
BLOCK_Y = GRID_SIZE;
dim3 dimGRID(BLOCK_X,BLOCK_Y);
dim3 dimBLOCK(BLOCK_SIZE,BLOCK_SIZE);
//Starting timer
cudaEventRecord(start, 0);
printf("Launching K-Means Kernels.. \n");
//Iteration of kmeans algorithm
int num_iterations;
for(int i = 0; i < nIterations; i++) {
num_iterations = i;
flag=0;
CUDA_CALL(cudaMemcpy(dev_flag,&flag,sizeof(int),cudaMemcpyHostToDevice));
clearArrays<<<1, dimBLOCK>>>(dev_sumRed, dev_sumGreen, dev_sumBlue, dev_pixelClusterCounter, dev_tempRedCentroid, dev_tempGreenCentroid, dev_tempBlueCentroid);
clearLabelArray<<<dimGRID, dimBLOCK>>>(dev_labelArray);
getClusterLabel<<< dimGRID, dimBLOCK >>> (dev_Red, dev_Green, dev_Blue,dev_labelArray);
sumCluster<<<dimGRID, dimBLOCK>>> (dev_Red, dev_Green, dev_Blue, dev_sumRed, dev_sumGreen, dev_sumBlue, dev_labelArray,dev_pixelClusterCounter);
updateCentroids<<<1,dimBLOCK >>>(dev_tempRedCentroid, dev_tempGreenCentroid, dev_tempBlueCentroid, dev_sumRed, dev_sumGreen, dev_sumBlue, dev_pixelClusterCounter,dev_flag);
CUDA_CALL(cudaMemcpy(redCentroid, dev_tempRedCentroid, CLUSTER_BYTES,cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(greenCentroid, dev_tempGreenCentroid, CLUSTER_BYTES,cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(blueCentroid, dev_tempBlueCentroid, CLUSTER_BYTES,cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(&flag, dev_flag,sizeof(int),cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpyToSymbol(dev_RedCentroid, redCentroid, CLUSTER_BYTES));
CUDA_CALL(cudaMemcpyToSymbol(dev_GreenCentroid, greenCentroid, CLUSTER_BYTES));
CUDA_CALL(cudaMemcpyToSymbol(dev_BlueCentroid, blueCentroid, CLUSTER_BYTES));
if(flag==0)
break;
}
cudaEventRecord(stop, 0);
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
CUDA_CALL(cudaMemcpy(labelArray, dev_labelArray, IMAGE_BYTES, cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(sumRed, dev_sumRed, CLUSTER_BYTES, cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(sumGreen, dev_sumGreen, CLUSTER_BYTES, cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(sumBlue, dev_sumBlue, CLUSTER_BYTES, cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(pixelClusterCounter, dev_pixelClusterCounter, CLUSTER_BYTES, cudaMemcpyDeviceToHost));
printf("Kmeans code ran in: %f secs.\n", elapsed/1000.0);
printf("Converged in %d iterations.\n",num_iterations);
printf("\n");
// labelArray DEBUG
int counter = 0;
printf("Label Array:\n");
for(int i = 0; i < (size); i++) {
//printf("%d\n", labelArray[i]);
counter++;
}
printf("printing counter %d\n", counter);
counter = 0;
printf("Sum Arrays:\n");
for(int j = 0; j < nCentroids; j++) {
printf("r: %u g: %u b: %u \n", sumRed[j], sumGreen[j], sumBlue[j]);
counter++;
}
printf("\n");
printf("Pixels per centroids:\n");
for(int k = 0; k < nCentroids; k++){
printf("%d centroid: %d pixels\n", k, pixelClusterCounter[k]);
}
printf("\n");
printf("New centroids:\n");
for(int i = 0; i < nCentroids; i++) {
printf("%d, %d, %d \n", redCentroid[i], greenCentroid[i], blueCentroid[i]);
}
// writing...
printf("Image writing...\n");
if (writeRawImage(outputFile,labelArray, redCentroid, greenCentroid, blueCentroid, size)) {
printf("Image written!\n");
} else {
printf("NOT written!\n");
return -1;
}
free(r);
free(g);
free(b);
free(redCentroid);
free(greenCentroid);
free(blueCentroid);
free(labelArray);
free(sumRed);
free(sumGreen);
free(sumBlue);
free(pixelClusterCounter);
CUDA_CALL(cudaFree(dev_Red));
CUDA_CALL(cudaFree(dev_Green));
CUDA_CALL(cudaFree(dev_Blue));
CUDA_CALL(cudaFree(dev_tempRedCentroid));
CUDA_CALL(cudaFree(dev_tempGreenCentroid));
CUDA_CALL(cudaFree(dev_tempBlueCentroid));
CUDA_CALL(cudaFree(dev_labelArray));
CUDA_CALL(cudaFree(dev_sumRed));
CUDA_CALL(cudaFree(dev_sumGreen));
CUDA_CALL(cudaFree(dev_sumBlue));
CUDA_CALL(cudaFree(dev_pixelClusterCounter));
printf("That's the end.\n");
return 0;
}
|
ad34774c9ab01174f1ce978e0bb4ee055722c7cc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
// function for checking the CUDA runtime API results.
inline
void checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess)
{
printf("Error: %s : %d, ", __FILE__, __LINE__);
printf("CUDA Runtime Error: %d: %s\n", result, hipGetErrorString(result));
exit(1);
}
#endif
}
void initialData(float *ip, const int size)
{
int i;
for (i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx,
const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
for (int iy = 0; iy < ny; iy++)
{
for (int ix = 0; ix < nx; ix++)
{
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
return;
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
// grid 1D block 1D
__global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < nx)
for (int iy = 0; iy < ny; iy++)
{
int idx = iy * nx + ix;
MatC[idx] = MatA[idx] + MatB[idx];
}
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
checkCuda(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
checkCuda(hipSetDevice(dev));
// set up data size of matrix
int nx = 1 << 14;
int ny = 1 << 14;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A, nxy);
initialData(h_B, nxy);
// add matrix at host side for result check
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC;
checkCuda(hipMalloc((void **)&d_MatA, nBytes));
checkCuda(hipMalloc((void **)&d_MatB, nBytes));
checkCuda(hipMalloc((void **)&d_MatC, nBytes));
// transfer data from host to device
checkCuda(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice));
checkCuda(hipMemcpy(d_MatB, h_B, nBytes, hipMemcpyHostToDevice));
// invoke kernel at host side
int dimx = 256;
dim3 block(dimx, 1);
dim3 grid((nx + block.x - 1) / block.x, 1);
sumMatrixOnGPU1D << <grid, block >> >(d_MatA, d_MatB, d_MatC, nx, ny);
checkCuda(hipDeviceSynchronize());
printf("sumMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>>\n", grid.x, grid.y, block.x, block.y);
// Check kernel error
checkCuda(hipGetLastError());
// copy kernel result back to host side
checkCuda(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
checkCuda(hipFree(d_MatA));
checkCuda(hipFree(d_MatB));
checkCuda(hipFree(d_MatC));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
checkCuda(hipDeviceReset());
return 0;
}
| ad34774c9ab01174f1ce978e0bb4ee055722c7cc.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
// function for checking the CUDA runtime API results.
inline
void checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess)
{
printf("Error: %s : %d, ", __FILE__, __LINE__);
printf("CUDA Runtime Error: %d: %s\n", result, cudaGetErrorString(result));
exit(1);
}
#endif
}
void initialData(float *ip, const int size)
{
int i;
for (i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx,
const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
for (int iy = 0; iy < ny; iy++)
{
for (int ix = 0; ix < nx; ix++)
{
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
return;
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
// grid 1D block 1D
__global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < nx)
for (int iy = 0; iy < ny; iy++)
{
int idx = iy * nx + ix;
MatC[idx] = MatA[idx] + MatB[idx];
}
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
checkCuda(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
checkCuda(cudaSetDevice(dev));
// set up data size of matrix
int nx = 1 << 14;
int ny = 1 << 14;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A, nxy);
initialData(h_B, nxy);
// add matrix at host side for result check
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC;
checkCuda(cudaMalloc((void **)&d_MatA, nBytes));
checkCuda(cudaMalloc((void **)&d_MatB, nBytes));
checkCuda(cudaMalloc((void **)&d_MatC, nBytes));
// transfer data from host to device
checkCuda(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice));
// invoke kernel at host side
int dimx = 256;
dim3 block(dimx, 1);
dim3 grid((nx + block.x - 1) / block.x, 1);
sumMatrixOnGPU1D << <grid, block >> >(d_MatA, d_MatB, d_MatC, nx, ny);
checkCuda(cudaDeviceSynchronize());
printf("sumMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>>\n", grid.x, grid.y, block.x, block.y);
// Check kernel error
checkCuda(cudaGetLastError());
// copy kernel result back to host side
checkCuda(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
checkCuda(cudaFree(d_MatA));
checkCuda(cudaFree(d_MatB));
checkCuda(cudaFree(d_MatC));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
checkCuda(cudaDeviceReset());
return 0;
}
|
a1ad3e5b0f4aa07e80394380034301c29ccb4b0d.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeSphere.h"
#include "ShapeConvexPolygon.h"
#include "ShapePolyhedron.h"
#include "ShapeConvexPolyhedron.h"
#include "ShapeSpheropolyhedron.h"
#include "ShapeSpheropolygon.h"
#include "ShapeSimplePolygon.h"
#include "ShapeEllipsoid.h"
#include "ShapeFacetedSphere.h"
#include "ShapeSphinx.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeConvexPolyhedron<128>
template hipError_t gpu_hpmc_free_volume<ShapeConvexPolyhedron<128> >(const hpmc_free_volume_args_t &args,
const typename ShapeConvexPolyhedron<128> ::param_type *d_params);
template hipError_t gpu_hpmc_update<ShapeConvexPolyhedron<128> >(const hpmc_args_t& args,
const typename ShapeConvexPolyhedron<128> ::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeConvexPolyhedron<128> >(const hpmc_implicit_args_t& args,
const typename ShapeConvexPolyhedron<128> ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject<ShapeConvexPolyhedron<128> >(const hpmc_implicit_args_t& args,
const typename ShapeConvexPolyhedron<128> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
| a1ad3e5b0f4aa07e80394380034301c29ccb4b0d.cu | // Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeSphere.h"
#include "ShapeConvexPolygon.h"
#include "ShapePolyhedron.h"
#include "ShapeConvexPolyhedron.h"
#include "ShapeSpheropolyhedron.h"
#include "ShapeSpheropolygon.h"
#include "ShapeSimplePolygon.h"
#include "ShapeEllipsoid.h"
#include "ShapeFacetedSphere.h"
#include "ShapeSphinx.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeConvexPolyhedron<128>
template cudaError_t gpu_hpmc_free_volume<ShapeConvexPolyhedron<128> >(const hpmc_free_volume_args_t &args,
const typename ShapeConvexPolyhedron<128> ::param_type *d_params);
template cudaError_t gpu_hpmc_update<ShapeConvexPolyhedron<128> >(const hpmc_args_t& args,
const typename ShapeConvexPolyhedron<128> ::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeConvexPolyhedron<128> >(const hpmc_implicit_args_t& args,
const typename ShapeConvexPolyhedron<128> ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeConvexPolyhedron<128> >(const hpmc_implicit_args_t& args,
const typename ShapeConvexPolyhedron<128> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
|
88c062bd43f70dd2701f22c458fe18bdfa340c0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <ctime>
//#include <conio.h>
#include <time.h>
#include <hiprand/hiprand_kernel.h>
using namespace std;
#define M 128
#define N 64
#define MaxThreadPerBlock 1024
#define Blocknum 4096//1time:2^22 18min:2^38 1day:2^44-2^45
int64_t checkrepeat(FILE *f,int name,int mode)
{
int64_t num;
int k=0;
int sum=0;
int64_t results[1000000];
fseek(f,0,0);
for(int64_t i=0;!feof(f);i++)
{
k=0;
fscanf(f, "%ld", &num);
for(int64_t t=0;t<sum;t++)
{
if(num==results[t])
{
k=1;
//cout<<num<<endl;
break;
}
}
if(k==0)
{
results[sum]=num;
sum=sum+1;
//cout<<sum<<endl;
}
}
fclose(f);
if(mode==1)
{
if(name==16)
{
f=fopen("16_1.txt","w+");
}
else if(name==18)
{
f=fopen("18_1.txt","w+");
}
else if(name==20)
{
f=fopen("20_1.txt","w+");
}
else if(name==22)
{
f=fopen("22_1.txt","w+");
}
for(int64_t i=0;i<sum;i++)
{
fprintf(f,"%ld ",results[i]);
}
fprintf(f,"\n\n");
fclose(f);
printf("saved\n");
}
if(name==16)
{
f=fopen("16.txt","w+");
}
else if(name==18)
{
f=fopen("18.txt","w+");
}
else if(name==20)
{
f=fopen("20.txt","w+");
}
else if(name==22)
{
f=fopen("22.txt","w+");
}
for(int64_t i=0;i<sum;i++)
{
fprintf(f,"%ld ",results[i]);
}
fprintf(f,"\n\n");
//fclose(f);
return sum;
}
int64_t reverse(int64_t a)
{
int64_t b=0;
int64_t temp=1;
for(int i=0;i<64;i++)
{
b=b*2+((a>>i)&temp);
}
return b;
}
__device__ int64_t hamming(int64_t n) {
n = (n & 0x5555555555555555) + ((n >> 1) & 0x5555555555555555);
n = (n & 0x3333333333333333) + ((n >> 2) & 0x3333333333333333);
n = (n & 0x0f0f0f0f0f0f0f0f) + ((n >> 4) & 0x0f0f0f0f0f0f0f0f);
n = (n & 0x00ff00ff00ff00ff) + ((n >> 8) & 0x00ff00ff00ff00ff);
n = (n & 0x0000ffff0000ffff) + ((n >> 16) & 0x0000ffff0000ffff);
n = (n & 0x00000000ffffffff) + ((n >> 32) & 0x00000000ffffffff);
return n;
}
__global__ void simu(int64_t* a, int64_t* b, int* c,int t,int* weight)//
{
int offset;
offset = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int k;
k= offset / 128 + t * MaxThreadPerBlock * Blocknum / 128;
//c[k * M + offset%128] = hamming(a[offset%128] & b[k])%2;
atomicAdd(&weight[k], hamming(a[offset%128] & b[k])%2);
__syncthreads();
}
__global__ void hbadd(int64_t* b,int64_t rand)
{
int k = 0;
k = threadIdx.x + blockIdx.x * blockDim.x;
b[k] = 0;
int r = 0;
int r1=0;
int t=0;
int64_t weight=0;
hiprandState_t state;
hiprand_init((int64_t)rand*4194304+k,0,0,&state);
r=hiprand(&state)%10+1;
for(int i=0;i<r;i++)
{
while(weight==hamming(b[k])&&t<10)
{
t++;
r1=hiprand(&state)%64;
b[k]=b[k]|((int64_t)1<<r1);
}
t=0;
weight=hamming(b[k]);
}
__syncthreads();
}
__global__ void clearzero(int* c)
{
int k = 0;
k = threadIdx.x + blockIdx.x * blockDim.x;
c[k] = 0;
__syncthreads();
}
int main()
{
srand((unsigned int)time(NULL));
hipError_t cudaStatus;
hipDeviceProp_t prop;
FILE* fp_test;
int Num_Device;
// GPU,,GPU
cudaStatus = hipGetDeviceCount(&Num_Device);
if (cudaStatus != hipSuccess) // GPU,
{
printf("There is no GPU beyond 1.0, exit!\n");
exit(0);
}
else
{
cudaStatus = hipGetDeviceProperties(&prop, Num_Device - 1); // GPU,
if (cudaStatus != hipSuccess) // GPU,
{
printf("Cannot get device properties, exit!\n");
exit(0);
}
}
printf("Device Name : %s.\n", prop.name);
printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock);
printf("multiProcessorCount : %d.\n", prop.multiProcessorCount);
printf("maxThreadsPerMultiProcessor : %d.\n", prop.maxThreadsPerMultiProcessor);
printf("Blocknum : %d.\n", Blocknum);
int* h_AT = (int*)malloc(sizeof(int) * M * N);
int64_t* h_A = (int64_t*)malloc(sizeof(int64_t) * M);
int64_t* h_A1 = (int64_t*)malloc(sizeof(int64_t) * M);
int64_t* h_B = (int64_t*)malloc(sizeof(int64_t) * Blocknum * MaxThreadPerBlock);
int* h_C = (int*)malloc(sizeof(int) * Blocknum * MaxThreadPerBlock * M);
int* weight = (int*)malloc(sizeof(int) * Blocknum * MaxThreadPerBlock);
int* resultnum = (int*)malloc(sizeof(int) * 23);
for (int i = 0; i < 23; i++)
{
resultnum[i] = 0;
}
fp_test = fopen("G_PAC.txt", "r");
int h_num = 0;
for (int i = 0; i < M * N; i++)
{
fscanf(fp_test, "%d", &h_num);//M1
h_AT[i] = h_num;
}
fclose(fp_test);
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
{
h_A[i] = 2 * h_A[i] + h_AT[(63-j) * M + i];
//h_A1[i] = 2 * h_A1[i] + h_AT[j* M + i];
}
}
// for (int i = 0; i < Blocknum * MaxThreadPerBlock; i++)
// {
// h_B[i] = i+1;
// }
int64_t* d_A, * d_B;
int* d_C,* d_weight;
hipMalloc((void**)&d_weight, sizeof(int) * Blocknum * MaxThreadPerBlock);
hipMalloc((void**)&d_A, sizeof(int64_t) * M);
//hipMalloc((void**)&d_A1, sizeof(int64_t) * M);
hipMalloc((void**)&d_B, sizeof(int64_t) * Blocknum * MaxThreadPerBlock);
hipMalloc((void**)&d_C, sizeof(int) * Blocknum * MaxThreadPerBlock * M);
//hipMalloc((void**)&d_resultnum, sizeof(int) * 23);
hipMemcpy(d_A, h_A, M * sizeof(int64_t), hipMemcpyHostToDevice);
//hipMemcpy(d_A1, h_A1, M * sizeof(int64_t), hipMemcpyHostToDevice);
//hipMemcpy(d_B, h_B, Blocknum * MaxThreadPerBlock * sizeof(int64_t), hipMemcpyHostToDevice);
//hipMemcpy(d_resultnum, resultnum, 23 * sizeof(int), hipMemcpyHostToDevice);
int64_t k = 0;
clock_t start = clock();
clock_t end;
FILE* fp_16;
FILE* fp_18;
FILE* fp_20;
FILE* fp_22;
fp_16=fopen("16.txt","a+");
fp_18=fopen("18.txt","a+");
fp_20=fopen("20.txt","a+");
fp_22=fopen("22.txt","a+");
fp_test=fopen("results.txt","a");
fprintf(fp_16,"\n\n");
fprintf(fp_18,"\n\n");
fprintf(fp_20,"\n\n");
fprintf(fp_22,"\n\n");
while (k<1024)//
//while(k<256)
{
//cout<<(unsigned int)time(NULL)<<endl<<endl;
hbadd << <Blocknum * MaxThreadPerBlock / 128, 128 >> > (d_B,k+(unsigned int)time(NULL));
for (int i = 0; i < 128; i++)
{
simu << <Blocknum * MaxThreadPerBlock / 128, 128 >> > (d_A, d_B,d_C,i,d_weight);
}
//hipDeviceSynchronize();
//countweight << < Blocknum, MaxThreadPerBlock >> > (d_weight,d_resultnum);
// hipMemcpy(h_C, d_C, Blocknum * MaxThreadPerBlock * (M + 1) * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(weight, d_weight, Blocknum * MaxThreadPerBlock * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_B, d_B, Blocknum * MaxThreadPerBlock * sizeof(int64_t), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
for(int i=0;i < MaxThreadPerBlock * Blocknum;i++)
{
if(weight[i]==16)
{
fprintf(fp_16,"%ld ",h_B[i]);
}
else if(weight[i]==18)
{
fprintf(fp_18,"%ld ",h_B[i]);
}
else if(weight[i]==20)
{
fprintf(fp_20,"%ld ",h_B[i]);
}
else if(weight[i]==22)
{
fprintf(fp_22,"%ld ",h_B[i]);
}
}
clearzero << < Blocknum, MaxThreadPerBlock >> > (d_weight);
if ((k+1)%256==0)
{
//srand((unsigned int)time(NULL));
if ((k+1)%1024)
{
resultnum[16]=checkrepeat(fp_16,16,0);
resultnum[18]=checkrepeat(fp_18,18,0);
resultnum[20]=checkrepeat(fp_20,20,0);
resultnum[22]=checkrepeat(fp_22,22,0);
}
else
{
resultnum[16]=checkrepeat(fp_16,16,1);
resultnum[18]=checkrepeat(fp_18,18,1);
resultnum[20]=checkrepeat(fp_20,20,1);
resultnum[22]=checkrepeat(fp_22,22,1);
}
end = clock();
printf("\ntime=%f min\n", (double)(end - start) / CLOCKS_PER_SEC/60);
//hipMemcpy(resultnum, d_resultnum, 23 * sizeof(int), hipMemcpyDeviceToHost);
fprintf(fp_test,"k = %ld\n",k);
for (int i = 0; i < 23; i++)
{
cout << resultnum[i] << " ";
fprintf(fp_test,"%d ",resultnum[i]);
}
cout << endl;
fprintf(fp_test,"\n");
cout << k << endl;
fclose(fp_test);
fp_test=fopen("results.txt","a");
}
k++;
//cout<<k<<endl;
}
resultnum[16]=checkrepeat(fp_16,16,1);
resultnum[18]=checkrepeat(fp_18,18,1);
resultnum[20]=checkrepeat(fp_20,20,1);
resultnum[22]=checkrepeat(fp_22,22,1);
end = clock();
printf("\ntime=%f min\n", (double)(end - start) / CLOCKS_PER_SEC/60);
hipMemcpy(h_B, d_B, Blocknum * MaxThreadPerBlock * sizeof(int64_t), hipMemcpyDeviceToHost);
hipMemcpy(h_C, d_C, Blocknum * MaxThreadPerBlock * M * sizeof(int), hipMemcpyDeviceToHost);
cout << h_B[Blocknum * MaxThreadPerBlock - 1] << endl;
//hipMemcpy(resultnum, d_resultnum, 23 * sizeof(int), hipMemcpyDeviceToHost);
cout<<endl;
// for(int i=0;i<1024;i++)
// {
// //fprintf(fp_test,"%ld ",h_B[i]);
// cout<<h_B[i]<<endl;
// }
// cout<<endl;
// for(int i=0;i<2000;i++)
// {
// cout<<h_C[i]<<" ";
// if((i+1)%128==0)
// {
// cout<<endl<<endl;
// }
// }
fprintf(fp_test,"k = %ld\n",k);
for (int i = 0; i < 23; i++)
{
cout << resultnum[i] << " ";
fprintf(fp_test,"%d ",resultnum[i]);
}
fprintf(fp_test,"\n");
cout << endl;
fclose(fp_16);
fclose(fp_18);
fclose(fp_20);
fclose(fp_22);
fclose(fp_test);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_AT);
free(h_B);
free(h_C);
return 0;
} | 88c062bd43f70dd2701f22c458fe18bdfa340c0a.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <ctime>
//#include <conio.h>
#include <time.h>
#include <curand_kernel.h>
using namespace std;
#define M 128
#define N 64
#define MaxThreadPerBlock 1024
#define Blocknum 4096//1time:2^22 18min:2^38 1day:2^44-2^45
int64_t checkrepeat(FILE *f,int name,int mode)
{
int64_t num;
int k=0;
int sum=0;
int64_t results[1000000];
fseek(f,0,0);
for(int64_t i=0;!feof(f);i++)
{
k=0;
fscanf(f, "%ld", &num);
for(int64_t t=0;t<sum;t++)
{
if(num==results[t])
{
k=1;
//cout<<num<<endl;
break;
}
}
if(k==0)
{
results[sum]=num;
sum=sum+1;
//cout<<sum<<endl;
}
}
fclose(f);
if(mode==1)
{
if(name==16)
{
f=fopen("16_1.txt","w+");
}
else if(name==18)
{
f=fopen("18_1.txt","w+");
}
else if(name==20)
{
f=fopen("20_1.txt","w+");
}
else if(name==22)
{
f=fopen("22_1.txt","w+");
}
for(int64_t i=0;i<sum;i++)
{
fprintf(f,"%ld ",results[i]);
}
fprintf(f,"\n\n");
fclose(f);
printf("saved\n");
}
if(name==16)
{
f=fopen("16.txt","w+");
}
else if(name==18)
{
f=fopen("18.txt","w+");
}
else if(name==20)
{
f=fopen("20.txt","w+");
}
else if(name==22)
{
f=fopen("22.txt","w+");
}
for(int64_t i=0;i<sum;i++)
{
fprintf(f,"%ld ",results[i]);
}
fprintf(f,"\n\n");
//fclose(f);
return sum;
}
int64_t reverse(int64_t a)
{
int64_t b=0;
int64_t temp=1;
for(int i=0;i<64;i++)
{
b=b*2+((a>>i)&temp);
}
return b;
}
__device__ int64_t hamming(int64_t n) {
n = (n & 0x5555555555555555) + ((n >> 1) & 0x5555555555555555);
n = (n & 0x3333333333333333) + ((n >> 2) & 0x3333333333333333);
n = (n & 0x0f0f0f0f0f0f0f0f) + ((n >> 4) & 0x0f0f0f0f0f0f0f0f);
n = (n & 0x00ff00ff00ff00ff) + ((n >> 8) & 0x00ff00ff00ff00ff);
n = (n & 0x0000ffff0000ffff) + ((n >> 16) & 0x0000ffff0000ffff);
n = (n & 0x00000000ffffffff) + ((n >> 32) & 0x00000000ffffffff);
return n;
}
__global__ void simu(int64_t* a, int64_t* b, int* c,int t,int* weight)//��������
{
int offset;
offset = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int k;
k= offset / 128 + t * MaxThreadPerBlock * Blocknum / 128;
//c[k * M + offset%128] = hamming(a[offset%128] & b[k])%2;
atomicAdd(&weight[k], hamming(a[offset%128] & b[k])%2);
__syncthreads();
}
__global__ void hbadd(int64_t* b,int64_t rand)
{
int k = 0;
k = threadIdx.x + blockIdx.x * blockDim.x;
b[k] = 0;
int r = 0;
int r1=0;
int t=0;
int64_t weight=0;
curandState state;
curand_init((int64_t)rand*4194304+k,0,0,&state);
r=curand(&state)%10+1;
for(int i=0;i<r;i++)
{
while(weight==hamming(b[k])&&t<10)
{
t++;
r1=curand(&state)%64;
b[k]=b[k]|((int64_t)1<<r1);
}
t=0;
weight=hamming(b[k]);
}
__syncthreads();
}
__global__ void clearzero(int* c)
{
int k = 0;
k = threadIdx.x + blockIdx.x * blockDim.x;
c[k] = 0;
__syncthreads();
}
int main()
{
srand((unsigned int)time(NULL));
cudaError_t cudaStatus;
cudaDeviceProp prop;
FILE* fp_test;
int Num_Device;
// ����ϵͳ�е�GPU����,��ָ��������һ��,ͬʱ�õ���GPU�����ܲ���
cudaStatus = cudaGetDeviceCount(&Num_Device);
if (cudaStatus != cudaSuccess) // û��һ��������ڼ����GPU,���������в���������
{
printf("There is no GPU beyond 1.0, exit!\n");
exit(0);
}
else
{
cudaStatus = cudaGetDeviceProperties(&prop, Num_Device - 1); // ѡ�����һ��GPU���ڼ���,ͬʱ����������ܲ���
if (cudaStatus != cudaSuccess) // û��һ��������ڼ����GPU,���������в���������
{
printf("Cannot get device properties, exit!\n");
exit(0);
}
}
printf("Device Name : %s.\n", prop.name);
printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock);
printf("multiProcessorCount : %d.\n", prop.multiProcessorCount);
printf("maxThreadsPerMultiProcessor : %d.\n", prop.maxThreadsPerMultiProcessor);
printf("Blocknum : %d.\n", Blocknum);
int* h_AT = (int*)malloc(sizeof(int) * M * N);
int64_t* h_A = (int64_t*)malloc(sizeof(int64_t) * M);
int64_t* h_A1 = (int64_t*)malloc(sizeof(int64_t) * M);
int64_t* h_B = (int64_t*)malloc(sizeof(int64_t) * Blocknum * MaxThreadPerBlock);
int* h_C = (int*)malloc(sizeof(int) * Blocknum * MaxThreadPerBlock * M);
int* weight = (int*)malloc(sizeof(int) * Blocknum * MaxThreadPerBlock);
int* resultnum = (int*)malloc(sizeof(int) * 23);
for (int i = 0; i < 23; i++)
{
resultnum[i] = 0;
}
fp_test = fopen("G_PAC.txt", "r");
int h_num = 0;
for (int i = 0; i < M * N; i++)
{
fscanf(fp_test, "%d", &h_num);//ÿM��Ԫ�أ�ֻ��¼һ����1��λ��
h_AT[i] = h_num;
}
fclose(fp_test);
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
{
h_A[i] = 2 * h_A[i] + h_AT[(63-j) * M + i];
//h_A1[i] = 2 * h_A1[i] + h_AT[j* M + i];
}
}
// for (int i = 0; i < Blocknum * MaxThreadPerBlock; i++)
// {
// h_B[i] = i+1;
// }
int64_t* d_A, * d_B;
int* d_C,* d_weight;
cudaMalloc((void**)&d_weight, sizeof(int) * Blocknum * MaxThreadPerBlock);
cudaMalloc((void**)&d_A, sizeof(int64_t) * M);
//cudaMalloc((void**)&d_A1, sizeof(int64_t) * M);
cudaMalloc((void**)&d_B, sizeof(int64_t) * Blocknum * MaxThreadPerBlock);
cudaMalloc((void**)&d_C, sizeof(int) * Blocknum * MaxThreadPerBlock * M);
//cudaMalloc((void**)&d_resultnum, sizeof(int) * 23);
cudaMemcpy(d_A, h_A, M * sizeof(int64_t), cudaMemcpyHostToDevice);
//cudaMemcpy(d_A1, h_A1, M * sizeof(int64_t), cudaMemcpyHostToDevice);
//cudaMemcpy(d_B, h_B, Blocknum * MaxThreadPerBlock * sizeof(int64_t), cudaMemcpyHostToDevice);
//cudaMemcpy(d_resultnum, resultnum, 23 * sizeof(int), cudaMemcpyHostToDevice);
int64_t k = 0;
clock_t start = clock();
clock_t end;
FILE* fp_16;
FILE* fp_18;
FILE* fp_20;
FILE* fp_22;
fp_16=fopen("16.txt","a+");
fp_18=fopen("18.txt","a+");
fp_20=fopen("20.txt","a+");
fp_22=fopen("22.txt","a+");
fp_test=fopen("results.txt","a");
fprintf(fp_16,"\n\n");
fprintf(fp_18,"\n\n");
fprintf(fp_20,"\n\n");
fprintf(fp_22,"\n\n");
while (k<1024)//��
//while(k<256)
{
//cout<<(unsigned int)time(NULL)<<endl<<endl;
hbadd << <Blocknum * MaxThreadPerBlock / 128, 128 >> > (d_B,k+(unsigned int)time(NULL));
for (int i = 0; i < 128; i++)
{
simu << <Blocknum * MaxThreadPerBlock / 128, 128 >> > (d_A, d_B,d_C,i,d_weight);
}
//cudaThreadSynchronize();
//countweight << < Blocknum, MaxThreadPerBlock >> > (d_weight,d_resultnum);
// cudaMemcpy(h_C, d_C, Blocknum * MaxThreadPerBlock * (M + 1) * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(weight, d_weight, Blocknum * MaxThreadPerBlock * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_B, d_B, Blocknum * MaxThreadPerBlock * sizeof(int64_t), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
for(int i=0;i < MaxThreadPerBlock * Blocknum;i++)
{
if(weight[i]==16)
{
fprintf(fp_16,"%ld ",h_B[i]);
}
else if(weight[i]==18)
{
fprintf(fp_18,"%ld ",h_B[i]);
}
else if(weight[i]==20)
{
fprintf(fp_20,"%ld ",h_B[i]);
}
else if(weight[i]==22)
{
fprintf(fp_22,"%ld ",h_B[i]);
}
}
clearzero << < Blocknum, MaxThreadPerBlock >> > (d_weight);
if ((k+1)%256==0)
{
//srand((unsigned int)time(NULL));
if ((k+1)%1024)
{
resultnum[16]=checkrepeat(fp_16,16,0);
resultnum[18]=checkrepeat(fp_18,18,0);
resultnum[20]=checkrepeat(fp_20,20,0);
resultnum[22]=checkrepeat(fp_22,22,0);
}
else
{
resultnum[16]=checkrepeat(fp_16,16,1);
resultnum[18]=checkrepeat(fp_18,18,1);
resultnum[20]=checkrepeat(fp_20,20,1);
resultnum[22]=checkrepeat(fp_22,22,1);
}
end = clock();
printf("\ntime=%f min\n", (double)(end - start) / CLOCKS_PER_SEC/60);
//cudaMemcpy(resultnum, d_resultnum, 23 * sizeof(int), cudaMemcpyDeviceToHost);
fprintf(fp_test,"k = %ld\n",k);
for (int i = 0; i < 23; i++)
{
cout << resultnum[i] << " ";
fprintf(fp_test,"%d ",resultnum[i]);
}
cout << endl;
fprintf(fp_test,"\n");
cout << k << endl;
fclose(fp_test);
fp_test=fopen("results.txt","a");
}
k++;
//cout<<k<<endl;
}
resultnum[16]=checkrepeat(fp_16,16,1);
resultnum[18]=checkrepeat(fp_18,18,1);
resultnum[20]=checkrepeat(fp_20,20,1);
resultnum[22]=checkrepeat(fp_22,22,1);
end = clock();
printf("\ntime=%f min\n", (double)(end - start) / CLOCKS_PER_SEC/60);
cudaMemcpy(h_B, d_B, Blocknum * MaxThreadPerBlock * sizeof(int64_t), cudaMemcpyDeviceToHost);
cudaMemcpy(h_C, d_C, Blocknum * MaxThreadPerBlock * M * sizeof(int), cudaMemcpyDeviceToHost);
cout << h_B[Blocknum * MaxThreadPerBlock - 1] << endl;
//cudaMemcpy(resultnum, d_resultnum, 23 * sizeof(int), cudaMemcpyDeviceToHost);
cout<<endl;
// for(int i=0;i<1024;i++)
// {
// //fprintf(fp_test,"%ld ",h_B[i]);
// cout<<h_B[i]<<endl;
// }
// cout<<endl;
// for(int i=0;i<2000;i++)
// {
// cout<<h_C[i]<<" ";
// if((i+1)%128==0)
// {
// cout<<endl<<endl;
// }
// }
fprintf(fp_test,"k = %ld\n",k);
for (int i = 0; i < 23; i++)
{
cout << resultnum[i] << " ";
fprintf(fp_test,"%d ",resultnum[i]);
}
fprintf(fp_test,"\n");
cout << endl;
fclose(fp_16);
fclose(fp_18);
fclose(fp_20);
fclose(fp_22);
fclose(fp_test);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_AT);
free(h_B);
free(h_C);
return 0;
} |
426a5f93fd74d9fcd3a1b6f419c0bed397f43ad8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
// CUDA example: illustrates kernel-allocated shared memory; does
// nothing useful, just copying an array from host to device global,
// then to device shared, doubling it there, then copying back to device
// global then host
__global__ void doubleit(int *dv, int n)
{ extern __shared__ int sv[];
int me = threadIdx.x;
// threads share in copying dv to sv, with each thread copying one
// element
sv[me] = dv[me];
sv[me] = 2 * sv[me];
dv[me] = sv[me];
}
int main(int argc, char **argv)
{
int n = atoi(argv[1]); // number of matrix rows/cols
int *hv, // host array
*dv; // device array
int vsize = n * sizeof(int); // size of array in bytes
// allocate space for host array
hv = (int *) malloc(vsize);
// fill test array with consecutive integers
int t = 0,i;
for (i = 0; i < n; i++)
hv[i] = t++;
// allocate space for device array
hipMalloc((void **)&dv,vsize);
// copy host array to device array
hipMemcpy(dv,hv,vsize,hipMemcpyHostToDevice);
// set up parameters for threads structure
dim3 dimGrid(1,1);
dim3 dimBlock(n,1,1); // all n threads in the same block
// invoke the kernel; third argument is amount of shared memory
hipLaunchKernelGGL(( doubleit), dim3(dimGrid),dim3(dimBlock),vsize, 0, dv,n);
// wait for kernel to finish
hipDeviceSynchronize();
// copy row array from device to host
hipMemcpy(hv,dv,vsize,hipMemcpyDeviceToHost);
// check results
if (n < 10) for(int i=0; i<n; i++) printf("%d\n",hv[i]);
// clean up
free(hv);
hipFree(dv);
}
| 426a5f93fd74d9fcd3a1b6f419c0bed397f43ad8.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
// CUDA example: illustrates kernel-allocated shared memory; does
// nothing useful, just copying an array from host to device global,
// then to device shared, doubling it there, then copying back to device
// global then host
__global__ void doubleit(int *dv, int n)
{ extern __shared__ int sv[];
int me = threadIdx.x;
// threads share in copying dv to sv, with each thread copying one
// element
sv[me] = dv[me];
sv[me] = 2 * sv[me];
dv[me] = sv[me];
}
int main(int argc, char **argv)
{
int n = atoi(argv[1]); // number of matrix rows/cols
int *hv, // host array
*dv; // device array
int vsize = n * sizeof(int); // size of array in bytes
// allocate space for host array
hv = (int *) malloc(vsize);
// fill test array with consecutive integers
int t = 0,i;
for (i = 0; i < n; i++)
hv[i] = t++;
// allocate space for device array
cudaMalloc((void **)&dv,vsize);
// copy host array to device array
cudaMemcpy(dv,hv,vsize,cudaMemcpyHostToDevice);
// set up parameters for threads structure
dim3 dimGrid(1,1);
dim3 dimBlock(n,1,1); // all n threads in the same block
// invoke the kernel; third argument is amount of shared memory
doubleit<<<dimGrid,dimBlock,vsize>>>(dv,n);
// wait for kernel to finish
cudaDeviceSynchronize();
// copy row array from device to host
cudaMemcpy(hv,dv,vsize,cudaMemcpyDeviceToHost);
// check results
if (n < 10) for(int i=0; i<n; i++) printf("%d\n",hv[i]);
// clean up
free(hv);
cudaFree(dv);
}
|
41f2d84f808453138f35a861de549218e470b60b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This code is copied from https://github.com/msracver/Deep-Image-Analogy
#include <stdio.h>
#include <hiprand/hiprand_kernel.h>
#define FLT_MIN 1.175494351e-38F
__host__ __device__ int clamp(int x, int x_max, int x_min) {//assume x_max >= x_min
if (x > x_max)
{
return x_max;
}
else if (x < x_min)
{
return x_min;
}
else
{
return x;
}
}
__host__ __device__ unsigned int XY_TO_INT(int x, int y) {//r represent the number of 10 degree, x,y - 11 bits, max = 2047, r - max = 36, 6 bits
return (((y) << 11) | (x));
}
__host__ __device__ int INT_TO_X(unsigned int v) {
return (v)&((1 << 11) - 1);
}
__host__ __device__ int INT_TO_Y(unsigned int v) {
return (v >> 11)&((1 << 11) - 1);
}
__host__ __device__ int cuMax(int a, int b) {
if (a > b) {
return a;
}
else {
return b;
}
}
__host__ __device__ int cuMin(int a, int b) {
if (a < b) {
return a;
}
else {
return b;
}
}
__device__ float MycuRand(hiprandState_t &state) {//random number in cuda, between 0 and 1
return hiprand_uniform(&state);
}
__device__ void InitcuRand(hiprandState_t &state) {//random number in cuda, between 0 and 1
int i = threadIdx.x + blockIdx.x * blockDim.x;
hiprand_init(i, 0, 0, &state);
}
__host__ __device__ float dist_compute(float * a, float * b, float * a1, float * b1, int channels, int a_rows, int a_cols, int b_rows, int b_cols, int ax, int ay, int bx, int by, int patch_w, float cutoff = INT_MAX) {//this is the average number of all matched pixel //suppose patch_w is an odd number
float pixel_sum = 0, pixel_no = 0, pixel_dist = 0;//number of pixels realy counted
float pixel_sum1 = 0;
int a_slice = a_rows*a_cols, b_slice = b_rows*b_cols;
int a_pitch = a_cols, b_pitch = b_cols;
float dp_tmp;
for (int dy = -patch_w / 2; dy <= patch_w / 2; dy++) {
for (int dx = -patch_w / 2; dx <= patch_w / 2; dx++) {
if (
(ay + dy) < a_rows && (ay + dy) >= 0 && (ax + dx) < a_cols && (ax + dx) >= 0
&&
(by + dy) < b_rows && (by + dy) >= 0 && (bx + dx) < b_cols && (bx + dx) >= 0
)//the pixel in a should exist and pixel in b should exist
{
if (channels == 3)
{
for (int dc = 0; dc < channels; dc++)
{
dp_tmp = a[dc * a_slice + (ay + dy) * a_pitch + (ax + dx)] - b[dc * b_slice + (by + dy) * b_pitch + (bx + dx)];
pixel_sum += dp_tmp * dp_tmp;
dp_tmp = a1[dc * a_slice + (ay + dy) * a_pitch + (ax + dx)] - b1[dc * b_slice + (by + dy) * b_pitch + (bx + dx)];
pixel_sum1 += dp_tmp * dp_tmp;
}
}
else
{
for (int dc = 0; dc < channels; dc++)
{
dp_tmp = a[dc * a_slice + (ay + dy) * a_pitch + (ax + dx)] * b[dc * b_slice + (by + dy) * b_pitch + (bx + dx)];
pixel_sum -= dp_tmp;
dp_tmp = a1[dc * a_slice + (ay + dy) * a_pitch + (ax + dx)] * b1[dc * b_slice + (by + dy) * b_pitch + (bx + dx)];
pixel_sum1 -= dp_tmp;
}
}
pixel_no += 1;
}
}
}
pixel_dist = (pixel_sum + pixel_sum1) / pixel_no;
if (pixel_dist >= cutoff) { return cutoff; }
else {
return pixel_dist;
}
}
__host__ __device__ float dist(float * a, float * b, float *a1, float *b1, int channels, int a_rows, int a_cols, int b_rows, int b_cols, int ax, int ay, int xp, int yp, int patch_w, float cutoff = INT_MAX) {
return dist_compute(a, b, a1, b1, channels, a_rows, a_cols, b_rows, b_cols, ax, ay, xp, yp, patch_w, cutoff);
}
__device__ void improve_guess(float * a, float * b, float *a1, float *b1, int channels, int a_rows, int a_cols, int b_rows, int b_cols, int ax, int ay, int &xbest, int &ybest, float &dbest, int xp, int yp, int patch_w, float rr) {
float d;
d = dist(a, b, a1, b1, channels, a_rows, a_cols, b_rows, b_cols, ax, ay, xp, yp, patch_w);
if (d + rr < dbest) {
xbest = xp;
ybest = yp;
dbest = d;
}
}
__global__ void initialAnn_kernel(unsigned int * ann, int * params) {
//just use 7 of 9 parameters
int ah = params[1];
int aw = params[2];
int ax = blockIdx.x*blockDim.x + threadIdx.x;
int ay = blockIdx.y*blockDim.y + threadIdx.y;
if (ax < aw && ay < ah) {
int bx = ax;
int by = ay;
ann[ay*aw + ax] = XY_TO_INT(bx, by);
}
}
__global__ void upSample_kernel(unsigned int * ann, unsigned int * ann_tmp,int * params, int aw_half,int ah_half) {
int ax = blockIdx.x*blockDim.x + threadIdx.x;
int ay = blockIdx.y*blockDim.y + threadIdx.y;
int ah = params[1];
int aw = params[2];
int bh = params[3];
int bw = params[4];
float aw_ratio = (float)aw / (float)aw_half;
float ah_ratio = (float)ah / (float)ah_half;
int ax_half = (ax+0.5) / aw_ratio;
int ay_half = (ay+0.5) / ah_ratio;
ax_half = clamp(ax_half, aw_half - 1, 0);
ay_half = clamp(ay_half, ah_half - 1, 0);
if (ax < aw&&ay < ah) {
unsigned int v_half = ann[ay_half*aw_half + ax_half];
int bx_half = INT_TO_X(v_half);
int by_half = INT_TO_Y(v_half);
int bx = ax + (bx_half - ax_half)*aw_ratio + 0.5;
int by = ay + (by_half - ay_half)*ah_ratio + 0.5;
bx = clamp(bx, bw-1, 0);
by = clamp(by, bh-1, 0);
ann_tmp[ay*aw + ax] = XY_TO_INT(bx, by);
}
}
extern "C"
__global__ void patch_match(float * a, float * b, float *a1, float *b1, unsigned int *ann, float *annd, int ch, int a_rows, int a_cols, int b_rows, int b_cols, int patch_w, int pm_iters, int rs_max) {
int ax = blockIdx.x*blockDim.x + threadIdx.x;
int ay = blockIdx.y*blockDim.y + threadIdx.y;
if (ax < a_cols && ay < a_rows) {
// for random number
hiprandState_t state;
InitcuRand(state);
unsigned int v, vp;
int xp, yp, xbest, ybest;
int xmin, xmax, ymin, ymax;
float dbest;
v = ann[ay*a_cols + ax];
xbest = INT_TO_X(v), ybest = INT_TO_Y(v);
annd[ay*a_cols + ax] = dist(a, b, a1, b1, ch, a_rows, a_cols, b_rows, b_cols, ax, ay, xbest, ybest, patch_w);
for (int iter = 0; iter < pm_iters; iter++) {
/* Current (best) guess. */
v = ann[ay*a_cols + ax];
xbest = INT_TO_X(v), ybest = INT_TO_Y(v);
dbest = annd[ay*a_cols + ax];
/* In each iteration, improve the NNF, by jumping flooding. */
for (int jump = 8; jump > 0; jump /= 2) {
/* Propagation: Improve current guess by trying instead correspondences from left, right, up and downs. */
if ((ax - jump) < a_cols && (ax - jump) >= 0)//left
{
vp = ann[ay*a_cols + ax - jump];//the pixel coordinates in image b
xp = INT_TO_X(vp) + jump, yp = INT_TO_Y(vp);//the propagated match from vp, the center of the patch, which should be in the image
if (yp >= 0 && yp < b_rows && xp >= 0 && xp < b_cols)
{
//improve guess
improve_guess(a, b, a1, b1, ch, a_rows, a_cols, b_rows, b_cols, ax, ay, xbest, ybest, dbest, xp, yp, patch_w, 0);
ann[ay*a_cols + ax] = XY_TO_INT(xbest, ybest);
annd[ay*a_cols + ax] = dbest;
}
}
if ((ax + jump) < a_cols)//right
{
vp = ann[ay*a_cols + ax + jump];//the pixel coordinates in image b
xp = INT_TO_X(vp) - jump, yp = INT_TO_Y(vp);
if (yp >= 0 && yp < b_rows && xp >= 0 && xp < b_cols)
{
//improve guess
improve_guess(a, b, a1, b1, ch, a_rows, a_cols, b_rows, b_cols, ax, ay, xbest, ybest, dbest, xp, yp, patch_w, 0);
ann[ay*a_cols + ax] = XY_TO_INT(xbest, ybest);
annd[ay*a_cols + ax] = dbest;
}
}
if ((ay - jump) < a_rows && (ay - jump) >= 0)//up
{
vp = ann[(ay - jump)*a_cols + ax];//the pixel coordinates in image b
xp = INT_TO_X(vp), yp = INT_TO_Y(vp) + jump;
if (yp >= 0 && yp < b_rows && xp >= 0 && xp < b_cols)
{
//improve guess
improve_guess(a, b, a1, b1, ch, a_rows, a_cols, b_rows, b_cols, ax, ay, xbest, ybest, dbest, xp, yp, patch_w, 0);
ann[ay*a_cols + ax] = XY_TO_INT(xbest, ybest);
annd[ay*a_cols + ax] = dbest;
}
}
if ((ay + jump) < a_rows)//down
{
vp = ann[(ay + jump)*a_cols + ax];//the pixel coordinates in image b
xp = INT_TO_X(vp), yp = INT_TO_Y(vp) - jump;
if (yp >= 0 && yp < b_rows && xp >= 0 && xp < b_cols)
{
//improve guess
improve_guess(a, b, a1, b1, ch, a_rows, a_cols, b_rows, b_cols, ax, ay, xbest, ybest, dbest, xp, yp, patch_w, 0);
ann[ay*a_cols + ax] = XY_TO_INT(xbest, ybest);
annd[ay*a_cols + ax] = dbest;
}
}
}
/* Random search: Improve current guess by searching in boxes of exponentially decreasing size around the current best guess. */
int rs_start = rs_max;
for (int mag = rs_start; mag >= 1; mag /= 2) {
/* Sampling window */
xmin = cuMax(xbest - mag, 0), xmax = cuMin(xbest + mag + 1, b_cols);
ymin = cuMax(ybest - mag, 0), ymax = cuMin(ybest + mag + 1, b_rows);
xp = xmin + (int)(MycuRand(state)*(xmax - xmin)) % (xmax - xmin);
yp = ymin + (int)(MycuRand(state)*(ymax - ymin)) % (ymax - ymin);
//improve guess
improve_guess(a, b, a1, b1, ch, a_rows, a_cols, b_rows, b_cols, ax, ay, xbest, ybest, dbest, xp, yp, patch_w, FLT_MIN);
}
ann[ay*a_cols + ax] = XY_TO_INT(xbest, ybest);
annd[ay*a_cols + ax] = dbest;
__syncthreads();
}
}
}
__global__ void blend(float *cmap, float* oldd, float* newd, float weight,int * params)
{
int ax = blockIdx.x*blockDim.x + threadIdx.x;
int ay = blockIdx.y*blockDim.y + threadIdx.y;
int ch = params[0];
int ah = params[1];
int aw = params[2];
int slice_a = ah * aw;
int pitch_a = aw;
float thre = 0.05;
if (ax < aw&& ay < ah)
{
float fa = cmap[ay*pitch_a + ax];
if (fa < thre)
fa = 0.0f;
else fa = weight;
for (int i = 0; i < ch; i++)
{
newd[i*slice_a + ay*pitch_a + ax] = oldd[i*slice_a + ay*pitch_a + ax]* fa + newd[i*slice_a + ay*pitch_a + ax] * (1.0-fa);
}
}
}
// ********** VOTE ***********
extern "C"
__global__ void avg_vote(unsigned int * ann, float * pb, float * pc, int ch, int ah, int aw, int bh, int bw, int patch_w) {
int ax = blockIdx.x*blockDim.x + threadIdx.x;
int ay = blockIdx.y*blockDim.y + threadIdx.y;
int slice_a = ah * aw;
int pitch_a = aw;
int slice_b = bh * bw;
int pitch_b = bw;
int count = 0;
if (ax < aw&&ay < ah)
{
//set zero for all the channels at (ax,ay)
for (int i = 0; i < ch; i++)
{
pc[i*slice_a + ay*pitch_a + ax] = 0;
}
//count the sum of all the possible value of (ax,ay)
for (int dx = -patch_w / 2; dx <= patch_w / 2; dx++) {
for (int dy = -patch_w / 2; dy <= patch_w / 2; dy++)
{
if ((ax + dx) < aw && (ax + dx) >= 0 && (ay + dy) < ah && (ay + dy) >= 0)
{
unsigned int vp = ann[(ay + dy)*aw + ax + dx];
int xp = INT_TO_X(vp);
int yp = INT_TO_Y(vp);
if ((xp - dx) < bw && (xp - dx) >= 0 && (yp - dy) < bh && (yp - dy) >= 0)
{
count++;
for (int dc = 0; dc < ch; dc++)
{
pc[dc*slice_a + ay*pitch_a + ax] += pb[dc*slice_b + (yp - dy)*pitch_b + xp - dx];
}
}
}
}
}
//count average value
for (int i = 0; i < ch; i++)
{
pc[i*slice_a + ay*pitch_a + ax] /= count;
}
}
}
| 41f2d84f808453138f35a861de549218e470b60b.cu | // This code is copied from https://github.com/msracver/Deep-Image-Analogy
#include <stdio.h>
#include <curand_kernel.h>
#define FLT_MIN 1.175494351e-38F
__host__ __device__ int clamp(int x, int x_max, int x_min) {//assume x_max >= x_min
if (x > x_max)
{
return x_max;
}
else if (x < x_min)
{
return x_min;
}
else
{
return x;
}
}
__host__ __device__ unsigned int XY_TO_INT(int x, int y) {//r represent the number of 10 degree, x,y - 11 bits, max = 2047, r - max = 36, 6 bits
return (((y) << 11) | (x));
}
__host__ __device__ int INT_TO_X(unsigned int v) {
return (v)&((1 << 11) - 1);
}
__host__ __device__ int INT_TO_Y(unsigned int v) {
return (v >> 11)&((1 << 11) - 1);
}
__host__ __device__ int cuMax(int a, int b) {
if (a > b) {
return a;
}
else {
return b;
}
}
__host__ __device__ int cuMin(int a, int b) {
if (a < b) {
return a;
}
else {
return b;
}
}
__device__ float MycuRand(curandState &state) {//random number in cuda, between 0 and 1
return curand_uniform(&state);
}
__device__ void InitcuRand(curandState &state) {//random number in cuda, between 0 and 1
int i = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(i, 0, 0, &state);
}
__host__ __device__ float dist_compute(float * a, float * b, float * a1, float * b1, int channels, int a_rows, int a_cols, int b_rows, int b_cols, int ax, int ay, int bx, int by, int patch_w, float cutoff = INT_MAX) {//this is the average number of all matched pixel //suppose patch_w is an odd number
float pixel_sum = 0, pixel_no = 0, pixel_dist = 0;//number of pixels realy counted
float pixel_sum1 = 0;
int a_slice = a_rows*a_cols, b_slice = b_rows*b_cols;
int a_pitch = a_cols, b_pitch = b_cols;
float dp_tmp;
for (int dy = -patch_w / 2; dy <= patch_w / 2; dy++) {
for (int dx = -patch_w / 2; dx <= patch_w / 2; dx++) {
if (
(ay + dy) < a_rows && (ay + dy) >= 0 && (ax + dx) < a_cols && (ax + dx) >= 0
&&
(by + dy) < b_rows && (by + dy) >= 0 && (bx + dx) < b_cols && (bx + dx) >= 0
)//the pixel in a should exist and pixel in b should exist
{
if (channels == 3)
{
for (int dc = 0; dc < channels; dc++)
{
dp_tmp = a[dc * a_slice + (ay + dy) * a_pitch + (ax + dx)] - b[dc * b_slice + (by + dy) * b_pitch + (bx + dx)];
pixel_sum += dp_tmp * dp_tmp;
dp_tmp = a1[dc * a_slice + (ay + dy) * a_pitch + (ax + dx)] - b1[dc * b_slice + (by + dy) * b_pitch + (bx + dx)];
pixel_sum1 += dp_tmp * dp_tmp;
}
}
else
{
for (int dc = 0; dc < channels; dc++)
{
dp_tmp = a[dc * a_slice + (ay + dy) * a_pitch + (ax + dx)] * b[dc * b_slice + (by + dy) * b_pitch + (bx + dx)];
pixel_sum -= dp_tmp;
dp_tmp = a1[dc * a_slice + (ay + dy) * a_pitch + (ax + dx)] * b1[dc * b_slice + (by + dy) * b_pitch + (bx + dx)];
pixel_sum1 -= dp_tmp;
}
}
pixel_no += 1;
}
}
}
pixel_dist = (pixel_sum + pixel_sum1) / pixel_no;
if (pixel_dist >= cutoff) { return cutoff; }
else {
return pixel_dist;
}
}
__host__ __device__ float dist(float * a, float * b, float *a1, float *b1, int channels, int a_rows, int a_cols, int b_rows, int b_cols, int ax, int ay, int xp, int yp, int patch_w, float cutoff = INT_MAX) {
return dist_compute(a, b, a1, b1, channels, a_rows, a_cols, b_rows, b_cols, ax, ay, xp, yp, patch_w, cutoff);
}
__device__ void improve_guess(float * a, float * b, float *a1, float *b1, int channels, int a_rows, int a_cols, int b_rows, int b_cols, int ax, int ay, int &xbest, int &ybest, float &dbest, int xp, int yp, int patch_w, float rr) {
float d;
d = dist(a, b, a1, b1, channels, a_rows, a_cols, b_rows, b_cols, ax, ay, xp, yp, patch_w);
if (d + rr < dbest) {
xbest = xp;
ybest = yp;
dbest = d;
}
}
__global__ void initialAnn_kernel(unsigned int * ann, int * params) {
//just use 7 of 9 parameters
int ah = params[1];
int aw = params[2];
int ax = blockIdx.x*blockDim.x + threadIdx.x;
int ay = blockIdx.y*blockDim.y + threadIdx.y;
if (ax < aw && ay < ah) {
int bx = ax;
int by = ay;
ann[ay*aw + ax] = XY_TO_INT(bx, by);
}
}
__global__ void upSample_kernel(unsigned int * ann, unsigned int * ann_tmp,int * params, int aw_half,int ah_half) {
int ax = blockIdx.x*blockDim.x + threadIdx.x;
int ay = blockIdx.y*blockDim.y + threadIdx.y;
int ah = params[1];
int aw = params[2];
int bh = params[3];
int bw = params[4];
float aw_ratio = (float)aw / (float)aw_half;
float ah_ratio = (float)ah / (float)ah_half;
int ax_half = (ax+0.5) / aw_ratio;
int ay_half = (ay+0.5) / ah_ratio;
ax_half = clamp(ax_half, aw_half - 1, 0);
ay_half = clamp(ay_half, ah_half - 1, 0);
if (ax < aw&&ay < ah) {
unsigned int v_half = ann[ay_half*aw_half + ax_half];
int bx_half = INT_TO_X(v_half);
int by_half = INT_TO_Y(v_half);
int bx = ax + (bx_half - ax_half)*aw_ratio + 0.5;
int by = ay + (by_half - ay_half)*ah_ratio + 0.5;
bx = clamp(bx, bw-1, 0);
by = clamp(by, bh-1, 0);
ann_tmp[ay*aw + ax] = XY_TO_INT(bx, by);
}
}
extern "C"
__global__ void patch_match(float * a, float * b, float *a1, float *b1, unsigned int *ann, float *annd, int ch, int a_rows, int a_cols, int b_rows, int b_cols, int patch_w, int pm_iters, int rs_max) {
int ax = blockIdx.x*blockDim.x + threadIdx.x;
int ay = blockIdx.y*blockDim.y + threadIdx.y;
if (ax < a_cols && ay < a_rows) {
// for random number
curandState state;
InitcuRand(state);
unsigned int v, vp;
int xp, yp, xbest, ybest;
int xmin, xmax, ymin, ymax;
float dbest;
v = ann[ay*a_cols + ax];
xbest = INT_TO_X(v), ybest = INT_TO_Y(v);
annd[ay*a_cols + ax] = dist(a, b, a1, b1, ch, a_rows, a_cols, b_rows, b_cols, ax, ay, xbest, ybest, patch_w);
for (int iter = 0; iter < pm_iters; iter++) {
/* Current (best) guess. */
v = ann[ay*a_cols + ax];
xbest = INT_TO_X(v), ybest = INT_TO_Y(v);
dbest = annd[ay*a_cols + ax];
/* In each iteration, improve the NNF, by jumping flooding. */
for (int jump = 8; jump > 0; jump /= 2) {
/* Propagation: Improve current guess by trying instead correspondences from left, right, up and downs. */
if ((ax - jump) < a_cols && (ax - jump) >= 0)//left
{
vp = ann[ay*a_cols + ax - jump];//the pixel coordinates in image b
xp = INT_TO_X(vp) + jump, yp = INT_TO_Y(vp);//the propagated match from vp, the center of the patch, which should be in the image
if (yp >= 0 && yp < b_rows && xp >= 0 && xp < b_cols)
{
//improve guess
improve_guess(a, b, a1, b1, ch, a_rows, a_cols, b_rows, b_cols, ax, ay, xbest, ybest, dbest, xp, yp, patch_w, 0);
ann[ay*a_cols + ax] = XY_TO_INT(xbest, ybest);
annd[ay*a_cols + ax] = dbest;
}
}
if ((ax + jump) < a_cols)//right
{
vp = ann[ay*a_cols + ax + jump];//the pixel coordinates in image b
xp = INT_TO_X(vp) - jump, yp = INT_TO_Y(vp);
if (yp >= 0 && yp < b_rows && xp >= 0 && xp < b_cols)
{
//improve guess
improve_guess(a, b, a1, b1, ch, a_rows, a_cols, b_rows, b_cols, ax, ay, xbest, ybest, dbest, xp, yp, patch_w, 0);
ann[ay*a_cols + ax] = XY_TO_INT(xbest, ybest);
annd[ay*a_cols + ax] = dbest;
}
}
if ((ay - jump) < a_rows && (ay - jump) >= 0)//up
{
vp = ann[(ay - jump)*a_cols + ax];//the pixel coordinates in image b
xp = INT_TO_X(vp), yp = INT_TO_Y(vp) + jump;
if (yp >= 0 && yp < b_rows && xp >= 0 && xp < b_cols)
{
//improve guess
improve_guess(a, b, a1, b1, ch, a_rows, a_cols, b_rows, b_cols, ax, ay, xbest, ybest, dbest, xp, yp, patch_w, 0);
ann[ay*a_cols + ax] = XY_TO_INT(xbest, ybest);
annd[ay*a_cols + ax] = dbest;
}
}
if ((ay + jump) < a_rows)//down
{
vp = ann[(ay + jump)*a_cols + ax];//the pixel coordinates in image b
xp = INT_TO_X(vp), yp = INT_TO_Y(vp) - jump;
if (yp >= 0 && yp < b_rows && xp >= 0 && xp < b_cols)
{
//improve guess
improve_guess(a, b, a1, b1, ch, a_rows, a_cols, b_rows, b_cols, ax, ay, xbest, ybest, dbest, xp, yp, patch_w, 0);
ann[ay*a_cols + ax] = XY_TO_INT(xbest, ybest);
annd[ay*a_cols + ax] = dbest;
}
}
}
/* Random search: Improve current guess by searching in boxes of exponentially decreasing size around the current best guess. */
int rs_start = rs_max;
for (int mag = rs_start; mag >= 1; mag /= 2) {
/* Sampling window */
xmin = cuMax(xbest - mag, 0), xmax = cuMin(xbest + mag + 1, b_cols);
ymin = cuMax(ybest - mag, 0), ymax = cuMin(ybest + mag + 1, b_rows);
xp = xmin + (int)(MycuRand(state)*(xmax - xmin)) % (xmax - xmin);
yp = ymin + (int)(MycuRand(state)*(ymax - ymin)) % (ymax - ymin);
//improve guess
improve_guess(a, b, a1, b1, ch, a_rows, a_cols, b_rows, b_cols, ax, ay, xbest, ybest, dbest, xp, yp, patch_w, FLT_MIN);
}
ann[ay*a_cols + ax] = XY_TO_INT(xbest, ybest);
annd[ay*a_cols + ax] = dbest;
__syncthreads();
}
}
}
__global__ void blend(float *cmap, float* oldd, float* newd, float weight,int * params)
{
int ax = blockIdx.x*blockDim.x + threadIdx.x;
int ay = blockIdx.y*blockDim.y + threadIdx.y;
int ch = params[0];
int ah = params[1];
int aw = params[2];
int slice_a = ah * aw;
int pitch_a = aw;
float thre = 0.05;
if (ax < aw&& ay < ah)
{
float fa = cmap[ay*pitch_a + ax];
if (fa < thre)
fa = 0.0f;
else fa = weight;
for (int i = 0; i < ch; i++)
{
newd[i*slice_a + ay*pitch_a + ax] = oldd[i*slice_a + ay*pitch_a + ax]* fa + newd[i*slice_a + ay*pitch_a + ax] * (1.0-fa);
}
}
}
// ********** VOTE ***********
extern "C"
__global__ void avg_vote(unsigned int * ann, float * pb, float * pc, int ch, int ah, int aw, int bh, int bw, int patch_w) {
int ax = blockIdx.x*blockDim.x + threadIdx.x;
int ay = blockIdx.y*blockDim.y + threadIdx.y;
int slice_a = ah * aw;
int pitch_a = aw;
int slice_b = bh * bw;
int pitch_b = bw;
int count = 0;
if (ax < aw&&ay < ah)
{
//set zero for all the channels at (ax,ay)
for (int i = 0; i < ch; i++)
{
pc[i*slice_a + ay*pitch_a + ax] = 0;
}
//count the sum of all the possible value of (ax,ay)
for (int dx = -patch_w / 2; dx <= patch_w / 2; dx++) {
for (int dy = -patch_w / 2; dy <= patch_w / 2; dy++)
{
if ((ax + dx) < aw && (ax + dx) >= 0 && (ay + dy) < ah && (ay + dy) >= 0)
{
unsigned int vp = ann[(ay + dy)*aw + ax + dx];
int xp = INT_TO_X(vp);
int yp = INT_TO_Y(vp);
if ((xp - dx) < bw && (xp - dx) >= 0 && (yp - dy) < bh && (yp - dy) >= 0)
{
count++;
for (int dc = 0; dc < ch; dc++)
{
pc[dc*slice_a + ay*pitch_a + ax] += pb[dc*slice_b + (yp - dy)*pitch_b + xp - dx];
}
}
}
}
}
//count average value
for (int i = 0; i < ch; i++)
{
pc[i*slice_a + ay*pitch_a + ax] /= count;
}
}
}
|
4dddd8d00e99012ac42bf928a01aa4906e2b7b3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CRtree.h"
__global__ void OddEvenSortPhaseKernel(float* d_arrayIn, int n, bool phase)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int subsetSize = 4;
int indexBegin;
if(phase) // FASE PAR
indexBegin = id * (subsetSize*2);
else // FASE IMPAR
indexBegin = id * ((subsetSize*2)-subsetSize);
if(indexBegin < n)
{
int indexEnd = indexBegin + (subsetSize*2);
int i, temp;
for(i = indexBegin; i < (indexEnd-1); i++)
{
if( i+1 < n &&
d_arrayIn[i] > d_arrayIn[i+1] )
{
temp = d_arrayIn[i];
d_arrayIn[i] = d_arrayIn[i+1];
d_arrayIn[i+1] = temp;
}
}
}
}
void OddEvenSortPhase(float* arrayIn, int n, bool phase)
{
int size = n * sizeof(float);
float *d_arrayIn;
hipMalloc((void **) &d_arrayIn, size);
hipMemcpy(d_arrayIn, arrayIn, size, hipMemcpyHostToDevice);
dim3 DimGrid(ceil(n/256.0), 1, 1);
dim3 DimBlock(256, 1, 1);
hipLaunchKernelGGL(( OddEvenSortPhaseKernel), dim3(DimGrid), dim3(DimBlock) , 0, 0, d_arrayIn, n, phase);
hipMemcpy(arrayIn, d_arrayIn, size, hipMemcpyDeviceToHost);
hipFree(d_arrayIn);
}
int main()
{
printf("Begin ==========\n");
int numEntries = 4;
int numFeatures = 64;
CRtree rtree(numEntries, numFeatures);
// INSERTANDO DATOS ==================================================
float* bufferIn;
bufferIn = (float*) malloc(numFeatures * sizeof(float));
for(int i = 0; i < numFeatures; i++)
bufferIn[i] = 0.0;
//printf("root: %d\n", rtree.m_root->m_typeNode);
rtree.InsertData(bufferIn);
rtree.InsertData(bufferIn);
rtree.InsertData(bufferIn);
rtree.InsertData(bufferIn);
rtree.InsertData(bufferIn);
//printf("root: %d\n", rtree.m_root->m_typeNode);
printf("End ============\n");
return 0;
}
| 4dddd8d00e99012ac42bf928a01aa4906e2b7b3a.cu | #include "CRtree.h"
__global__ void OddEvenSortPhaseKernel(float* d_arrayIn, int n, bool phase)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int subsetSize = 4;
int indexBegin;
if(phase) // FASE PAR
indexBegin = id * (subsetSize*2);
else // FASE IMPAR
indexBegin = id * ((subsetSize*2)-subsetSize);
if(indexBegin < n)
{
int indexEnd = indexBegin + (subsetSize*2);
int i, temp;
for(i = indexBegin; i < (indexEnd-1); i++)
{
if( i+1 < n &&
d_arrayIn[i] > d_arrayIn[i+1] )
{
temp = d_arrayIn[i];
d_arrayIn[i] = d_arrayIn[i+1];
d_arrayIn[i+1] = temp;
}
}
}
}
void OddEvenSortPhase(float* arrayIn, int n, bool phase)
{
int size = n * sizeof(float);
float *d_arrayIn;
cudaMalloc((void **) &d_arrayIn, size);
cudaMemcpy(d_arrayIn, arrayIn, size, cudaMemcpyHostToDevice);
dim3 DimGrid(ceil(n/256.0), 1, 1);
dim3 DimBlock(256, 1, 1);
OddEvenSortPhaseKernel<<< DimGrid, DimBlock >>>(d_arrayIn, n, phase);
cudaMemcpy(arrayIn, d_arrayIn, size, cudaMemcpyDeviceToHost);
cudaFree(d_arrayIn);
}
int main()
{
printf("Begin ==========\n");
int numEntries = 4;
int numFeatures = 64;
CRtree rtree(numEntries, numFeatures);
// INSERTANDO DATOS ==================================================
float* bufferIn;
bufferIn = (float*) malloc(numFeatures * sizeof(float));
for(int i = 0; i < numFeatures; i++)
bufferIn[i] = 0.0;
//printf("root: %d\n", rtree.m_root->m_typeNode);
rtree.InsertData(bufferIn);
rtree.InsertData(bufferIn);
rtree.InsertData(bufferIn);
rtree.InsertData(bufferIn);
rtree.InsertData(bufferIn);
//printf("root: %d\n", rtree.m_root->m_typeNode);
printf("End ============\n");
return 0;
}
|
67950a5fbddb91f54eafc26ee8b13761c6440f13.hip | // !!! This is a file automatically generated by hipify!!!
//
// A test program for gpu_trace.h library
//
// Compile with:
//
// Trace mode:
//
// nvcc test.cu -o test -D__ENABLE_TRACE__ -arch compute_13
//
// Normal mode:
//
// nvcc test.cu -o test
//
// Please note that -arch compute_13 is used because of __trace() ing double type data
// If you only use float ones, you may remove it
//
// Tested on Ubuntu 8.10 x86-64, CUDA 2.1
//
#include <stdio.h>
#include "hip/hip_runtime_api.h"
//
// Change some defaults
//
// Minimum thread index to trace
#define __TRACE_MIN_THREAD__ 1
//
// Maximum thread index to trace
#define __TRACE_MAX_THREAD__ 2
//
// Size of msg field
#define __TRACE_MSG_SIZE__ 16
#include "gpu_trace.h"
__global__ void test __traceable__ (int dummy)
{
int x = threadIdx.x;
__trace("Test", "int", x);
__trace("Test", "unsigned int", static_cast <unsigned int> (x));
__trace("Test", "long int", static_cast <long int> (x));
__trace("Test", "unsigned long int", static_cast <unsigned long int> (x));
__trace("Test", "float", static_cast <float> (x));
__trace("Test", "double", static_cast <double> (x));
for (int i = 0; i < x; i++)
__trace_exp("Loop", 3 + 2 * i);
}
int main()
{
INITIALIZE_TRACE_DATA();
hipLaunchKernelGGL(( test) , dim3(10), dim3(10), 0, 0, __traceable_call__ 0);
hipError_t ErrorCode = hipGetLastError();
if (ErrorCode != hipSuccess)
printf("*** Kernel did not launch, %s ***\n", hipGetErrorString(ErrorCode));
ErrorCode = hipDeviceSynchronize();
if (ErrorCode != hipSuccess)
printf("*** Kernel exited while executing, %s ***\n", hipGetErrorString(ErrorCode));
FINALIZE_TRACE_DATA();
PRINT_TRACE_DATA(stdout);
return 0;
}
| 67950a5fbddb91f54eafc26ee8b13761c6440f13.cu | //
// A test program for gpu_trace.h library
//
// Compile with:
//
// Trace mode:
//
// nvcc test.cu -o test -D__ENABLE_TRACE__ -arch compute_13
//
// Normal mode:
//
// nvcc test.cu -o test
//
// Please note that -arch compute_13 is used because of __trace() ing double type data
// If you only use float ones, you may remove it
//
// Tested on Ubuntu 8.10 x86-64, CUDA 2.1
//
#include <stdio.h>
#include "cuda_runtime_api.h"
//
// Change some defaults
//
// Minimum thread index to trace
#define __TRACE_MIN_THREAD__ 1
//
// Maximum thread index to trace
#define __TRACE_MAX_THREAD__ 2
//
// Size of msg field
#define __TRACE_MSG_SIZE__ 16
#include "gpu_trace.h"
__global__ void test __traceable__ (int dummy)
{
int x = threadIdx.x;
__trace("Test", "int", x);
__trace("Test", "unsigned int", static_cast <unsigned int> (x));
__trace("Test", "long int", static_cast <long int> (x));
__trace("Test", "unsigned long int", static_cast <unsigned long int> (x));
__trace("Test", "float", static_cast <float> (x));
__trace("Test", "double", static_cast <double> (x));
for (int i = 0; i < x; i++)
__trace_exp("Loop", 3 + 2 * i);
}
int main()
{
INITIALIZE_TRACE_DATA();
test <<<10, 10>>> __traceable_call__ (0);
cudaError_t ErrorCode = cudaGetLastError();
if (ErrorCode != cudaSuccess)
printf("*** Kernel did not launch, %s ***\n", cudaGetErrorString(ErrorCode));
ErrorCode = cudaThreadSynchronize();
if (ErrorCode != cudaSuccess)
printf("*** Kernel exited while executing, %s ***\n", cudaGetErrorString(ErrorCode));
FINALIZE_TRACE_DATA();
PRINT_TRACE_DATA(stdout);
return 0;
}
|
c24eaf454603dcdba44781a2bc680c328348df47.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cuda_copyRegion.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
unsigned char *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
int stepDst = 1;
int stepSrc = 1;
int dst_width = XSIZE;
int dst_height = YSIZE;
int src_width = XSIZE;
int src_height = YSIZE;
int dst_xoffset = 1;
int dst_yoffset = 1;
int dst_widthToCrop = XSIZE;
int dst_heightToCrop = YSIZE;
int src_xoffset = 1;
int src_yoffset = 1;
int src_widthToCrop = XSIZE;
int src_heightToCrop = YSIZE;
int numChannel = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cuda_copyRegion), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,src,stepDst,stepSrc,dst_width,dst_height,src_width,src_height,dst_xoffset,dst_yoffset,dst_widthToCrop,dst_heightToCrop,src_xoffset,src_yoffset,src_widthToCrop,src_heightToCrop,numChannel);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cuda_copyRegion), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,src,stepDst,stepSrc,dst_width,dst_height,src_width,src_height,dst_xoffset,dst_yoffset,dst_widthToCrop,dst_heightToCrop,src_xoffset,src_yoffset,src_widthToCrop,src_heightToCrop,numChannel);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cuda_copyRegion), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,src,stepDst,stepSrc,dst_width,dst_height,src_width,src_height,dst_xoffset,dst_yoffset,dst_widthToCrop,dst_heightToCrop,src_xoffset,src_yoffset,src_widthToCrop,src_heightToCrop,numChannel);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c24eaf454603dcdba44781a2bc680c328348df47.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cuda_copyRegion.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
unsigned char *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
int stepDst = 1;
int stepSrc = 1;
int dst_width = XSIZE;
int dst_height = YSIZE;
int src_width = XSIZE;
int src_height = YSIZE;
int dst_xoffset = 1;
int dst_yoffset = 1;
int dst_widthToCrop = XSIZE;
int dst_heightToCrop = YSIZE;
int src_xoffset = 1;
int src_yoffset = 1;
int src_widthToCrop = XSIZE;
int src_heightToCrop = YSIZE;
int numChannel = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cuda_copyRegion<<<gridBlock,threadBlock>>>(dst,src,stepDst,stepSrc,dst_width,dst_height,src_width,src_height,dst_xoffset,dst_yoffset,dst_widthToCrop,dst_heightToCrop,src_xoffset,src_yoffset,src_widthToCrop,src_heightToCrop,numChannel);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cuda_copyRegion<<<gridBlock,threadBlock>>>(dst,src,stepDst,stepSrc,dst_width,dst_height,src_width,src_height,dst_xoffset,dst_yoffset,dst_widthToCrop,dst_heightToCrop,src_xoffset,src_yoffset,src_widthToCrop,src_heightToCrop,numChannel);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cuda_copyRegion<<<gridBlock,threadBlock>>>(dst,src,stepDst,stepSrc,dst_width,dst_height,src_width,src_height,dst_xoffset,dst_yoffset,dst_widthToCrop,dst_heightToCrop,src_xoffset,src_yoffset,src_widthToCrop,src_heightToCrop,numChannel);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e02e714e66dd72eb5db4d52190e21d91a360341a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///
/// vecadd.cu
/// For CSU CS575 Spring 2011
/// Instructor: Wim Bohm
/// Based on code from the CUDA Programming Guide
/// Modified by Wim Bohm and David Newman
/// Created: 2011-02-03
/// Last Modified: 2011-03-03 DVN
///
/// Add two Vectors A and B in C on GPU using
/// a kernel defined according to vecAddKernel.h
/// Students must not modify this file. The GTA
/// will grade your submission using an unmodified
/// copy of this file.
///
// Includes
#include <stdio.h>
#include "pp_dynamic_access_offchip_memory_vecadd_repeat.h"
#include "high_resolution_power.h"
// Variables for host and device vectors.
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
int ValuesPerThread; // number of values per thread
// Utility Functions
void Cleanup(bool);
void checkCUDAError(const char *msg);
void call_gpu_function() {
dim3 dimGrid(GridWidth);
dim3 dimBlock(BlockWidth);
hipLaunchKernelGGL(( AddVectors), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, ValuesPerThread);
}
// Host code performs setup and calls the kernel.
int main(int argc, char** argv)
{
int N; //Vector size
// Parse arguments.
if(argc != 2){
printf("Usage: %s ValuesPerThread\n", argv[0]);
printf("ValuesPerThread is the number of values added by each thread.\n");
printf("Total vector size is 128 * 60 * this value.\n");
exit(0);
} else {
sscanf(argv[1], "%d", &ValuesPerThread);
}
// Determine the number of threads .
// N is the total number of values to be in a vector
N = ValuesPerThread * GridWidth * BlockWidth;
printf("Total vector size: %d\n", N);
// size_t is the total number of bytes for a vector.
size_t size = N * sizeof(float);
// Tell CUDA how big to make the grid and thread blocks.
// Since this is a vector addition problem,
// grid and thread block are both one-dimensional.
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) Cleanup(false);
h_B = (float*)malloc(size);
if (h_B == 0) Cleanup(false);
h_C = (float*)malloc(size);
if (h_C == 0) Cleanup(false);
// Allocate vectors in device memory.
hipError_t error;
error = hipMalloc((void**)&d_A, size);
if (error != hipSuccess) Cleanup(false);
error = hipMalloc((void**)&d_B, size);
if (error != hipSuccess) Cleanup(false);
error = hipMalloc((void**)&d_C, size);
if (error != hipSuccess) Cleanup(false);
// Initialize host vectors h_A and h_B
int i;
for(i=0; i<N; ++i){
h_A[i] = (float)i;
h_B[i] = (float)(N-i);
h_C[i] = (float)0.0;
}
// Copy host vectors h_A and h_B to device vectores d_A and d_B
error = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (error != hipSuccess) Cleanup(false);
error = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (error != hipSuccess) Cleanup(false);
error = hipMemcpy(d_C, h_C, size, hipMemcpyHostToDevice);
if (error != hipSuccess) Cleanup(false);
// Warm up
// hipLaunchKernelGGL(( AddVectors), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, ValuesPerThread);
call_gpu_function();
error = hipGetLastError();
if (error != hipSuccess) Cleanup(false);
hipDeviceSynchronize();
// Invoke kernel
// hipLaunchKernelGGL(( AddVectors), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, ValuesPerThread);
call_gpu_function();
error = hipGetLastError();
if (error != hipSuccess) Cleanup(false);
// Compute elapsed time
hipDeviceSynchronize();
long long exec_time_nanoseconds = get_exec_time_in_nanoseconds(call_gpu_function);
high_resolution_power_profile(call_gpu_function);
double time = exec_time_nanoseconds/1e6; //in ms
// Compute floating point operations per second.
double nFlops = (double)N*(double)REPS ;
double nFlopsPerSec = 1e3*nFlops/time;
double nGFlopsPerSec = nFlopsPerSec*1e-9;
//double nGFlopsPerSec = (1e3*N/(exec_time_nanoseconds/1e9))*1e-9;
//double nGFlopsPerSec = 1e3*N/exec_time_nanoseconds;
// Compute transfer rates.
double nBytes = 3*4*(double)N*(double)REPS; // 2N words in, 1N word out
double nBytesPerSec = 1e3*nBytes/time;
double nGBytesPerSec = nBytesPerSec*1e-9;
//double nGBytesPerSec = (1e3*nBytes/(exec_time_nanoseconds/1e9))*1e-9;
//double nGBytesPerSec = 1e3*nBytes/exec_time_nanoseconds;
// Report timing data.
printf( "GridWidth: %d BlockWidth: %d ValuesPerThread: %d REPS: %d Time: %f (ms), GFlopsS: %f GBytesS: %f nytes: %f nBytesPerS: %f\n", GridWidth, BlockWidth, ValuesPerThread, REPS,
time, nGFlopsPerSec, nGBytesPerSec, nBytes, nBytesPerSec);
// Copy result from device memory to host memory
error = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (error != hipSuccess) Cleanup(false);
// Verify & report result
for (i = 0; i < N; ++i) {
float val = h_C[i];
if (fabs(val - N) > 1e-5)
break;
}
printf("Test %s \n", (i == N) ? "PASSED" : "FAILED");
// Clean up and exit.
Cleanup(true);
}
void Cleanup(bool noError) { // simplified version from CUDA SDK
hipError_t error;
// Free device vectors
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
error = hipDeviceReset();
if (!noError || error != hipSuccess)
printf("cuda malloc or cuda thread exit failed \n");
fflush( stdout);
fflush( stderr);
exit(0);
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err) );
exit(-1);
}
}
| e02e714e66dd72eb5db4d52190e21d91a360341a.cu | ///
/// vecadd.cu
/// For CSU CS575 Spring 2011
/// Instructor: Wim Bohm
/// Based on code from the CUDA Programming Guide
/// Modified by Wim Bohm and David Newman
/// Created: 2011-02-03
/// Last Modified: 2011-03-03 DVN
///
/// Add two Vectors A and B in C on GPU using
/// a kernel defined according to vecAddKernel.h
/// Students must not modify this file. The GTA
/// will grade your submission using an unmodified
/// copy of this file.
///
// Includes
#include <stdio.h>
#include "pp_dynamic_access_offchip_memory_vecadd_repeat.h"
#include "high_resolution_power.h"
// Variables for host and device vectors.
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
int ValuesPerThread; // number of values per thread
// Utility Functions
void Cleanup(bool);
void checkCUDAError(const char *msg);
void call_gpu_function() {
dim3 dimGrid(GridWidth);
dim3 dimBlock(BlockWidth);
AddVectors<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, ValuesPerThread);
}
// Host code performs setup and calls the kernel.
int main(int argc, char** argv)
{
int N; //Vector size
// Parse arguments.
if(argc != 2){
printf("Usage: %s ValuesPerThread\n", argv[0]);
printf("ValuesPerThread is the number of values added by each thread.\n");
printf("Total vector size is 128 * 60 * this value.\n");
exit(0);
} else {
sscanf(argv[1], "%d", &ValuesPerThread);
}
// Determine the number of threads .
// N is the total number of values to be in a vector
N = ValuesPerThread * GridWidth * BlockWidth;
printf("Total vector size: %d\n", N);
// size_t is the total number of bytes for a vector.
size_t size = N * sizeof(float);
// Tell CUDA how big to make the grid and thread blocks.
// Since this is a vector addition problem,
// grid and thread block are both one-dimensional.
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) Cleanup(false);
h_B = (float*)malloc(size);
if (h_B == 0) Cleanup(false);
h_C = (float*)malloc(size);
if (h_C == 0) Cleanup(false);
// Allocate vectors in device memory.
cudaError_t error;
error = cudaMalloc((void**)&d_A, size);
if (error != cudaSuccess) Cleanup(false);
error = cudaMalloc((void**)&d_B, size);
if (error != cudaSuccess) Cleanup(false);
error = cudaMalloc((void**)&d_C, size);
if (error != cudaSuccess) Cleanup(false);
// Initialize host vectors h_A and h_B
int i;
for(i=0; i<N; ++i){
h_A[i] = (float)i;
h_B[i] = (float)(N-i);
h_C[i] = (float)0.0;
}
// Copy host vectors h_A and h_B to device vectores d_A and d_B
error = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (error != cudaSuccess) Cleanup(false);
error = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (error != cudaSuccess) Cleanup(false);
error = cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice);
if (error != cudaSuccess) Cleanup(false);
// Warm up
// AddVectors<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, ValuesPerThread);
call_gpu_function();
error = cudaGetLastError();
if (error != cudaSuccess) Cleanup(false);
cudaThreadSynchronize();
// Invoke kernel
// AddVectors<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, ValuesPerThread);
call_gpu_function();
error = cudaGetLastError();
if (error != cudaSuccess) Cleanup(false);
// Compute elapsed time
cudaThreadSynchronize();
long long exec_time_nanoseconds = get_exec_time_in_nanoseconds(call_gpu_function);
high_resolution_power_profile(call_gpu_function);
double time = exec_time_nanoseconds/1e6; //in ms
// Compute floating point operations per second.
double nFlops = (double)N*(double)REPS ;
double nFlopsPerSec = 1e3*nFlops/time;
double nGFlopsPerSec = nFlopsPerSec*1e-9;
//double nGFlopsPerSec = (1e3*N/(exec_time_nanoseconds/1e9))*1e-9;
//double nGFlopsPerSec = 1e3*N/exec_time_nanoseconds;
// Compute transfer rates.
double nBytes = 3*4*(double)N*(double)REPS; // 2N words in, 1N word out
double nBytesPerSec = 1e3*nBytes/time;
double nGBytesPerSec = nBytesPerSec*1e-9;
//double nGBytesPerSec = (1e3*nBytes/(exec_time_nanoseconds/1e9))*1e-9;
//double nGBytesPerSec = 1e3*nBytes/exec_time_nanoseconds;
// Report timing data.
printf( "GridWidth: %d BlockWidth: %d ValuesPerThread: %d REPS: %d Time: %f (ms), GFlopsS: %f GBytesS: %f nytes: %f nBytesPerS: %f\n", GridWidth, BlockWidth, ValuesPerThread, REPS,
time, nGFlopsPerSec, nGBytesPerSec, nBytes, nBytesPerSec);
// Copy result from device memory to host memory
error = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (error != cudaSuccess) Cleanup(false);
// Verify & report result
for (i = 0; i < N; ++i) {
float val = h_C[i];
if (fabs(val - N) > 1e-5)
break;
}
printf("Test %s \n", (i == N) ? "PASSED" : "FAILED");
// Clean up and exit.
Cleanup(true);
}
void Cleanup(bool noError) { // simplified version from CUDA SDK
cudaError_t error;
// Free device vectors
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
error = cudaThreadExit();
if (!noError || error != cudaSuccess)
printf("cuda malloc or cuda thread exit failed \n");
fflush( stdout);
fflush( stderr);
exit(0);
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err) );
exit(-1);
}
}
|
182c9f4491d8575c2bbd62bd4c531a4d4a7968e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
2 100100, .
. CUDA,
, ,
. 2020
.
*/
#include <stdio.h>
#define M 10
#define N 100
__global__ void add (int *arr1, int *arr2, int *arr3) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < N && y < N) {
if ((x % 2 == 0 && y % 2 == 0) || (x % 2 != 0 && y % 2 != 0))
arr3[y*N + x] = arr2[y*N + x];
if ((x % 2 == 0 && y % 2 != 0) || (x % 2 != 0 && y % 2 == 0))
arr3[y*N + x] = arr1[y*N + x];
}
}
void printError(const char *msg, const hipError_t &error)
{
printf("%s: ", msg);
if (error == hipSuccess) {
printf("ok");
} else {
printf("%s", hipGetErrorString(error));
}
printf("\n");
}
int main() {
int *arr1 = new int[N*N];
int *arr2 = new int[N*N];
for (int i = 0; i < N*N; ++i) {
arr1[i] = 0;
arr2[i] = 1;
}
int *dev_arr1;
int *dev_arr2;
int *dev_arr3;
uintmemory_size=(N*N)*sizeof(int);
printError("selecting device", hipSetDevice(0));
printError("malloc for A", hipMalloc((void**)&dev_arr1, memory_size));
printError("malloc for B", hipMalloc((void**)&dev_arr2, memory_size));
printError("malloc for C", hipMalloc((void**)&dev_arr3, memory_size));
printError("memcpy A", hipMemcpy(dev_arr1, arr1, memory_size, hipMemcpyHostToDevice));
printError("memcpy B", hipMemcpy(dev_arr2, arr2, memory_size, hipMemcpyHostToDevice));
dim3 threadsPerBlock(M, M);
dim3 numBlocks(N / threadsPerBlock.x, N / threadsPerBlock.y);
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, dev_arr1, dev_arr2, dev_arr3);
printError("thread sycnrhonize", hipDeviceSynchronize());
printError("memcpy C back", hipMemcpy(arr1, dev_arr3, memory_size, hipMemcpyDeviceToHost));
for (int i = 40; i < 60; ++i) {
for (int j = 40; j < 60; ++j) {
printf("%d ",arr1[i*N + j]);
}
printf("\n");
}
printError("free A", hipFree(dev_arr1));
printError("free B", hipFree(dev_arr2));
printError("free C", hipFree(dev_arr3));
return 0;
}
| 182c9f4491d8575c2bbd62bd4c531a4d4a7968e7.cu | /*
Имеется 2 массива 100х100, первый массив заполнен нулями. Второй массив
заполнен единицами. Написать алгоритм с использованием CUDA, позволяющий
получить третий массив, заполненный элементами первого и второго массива,
расположенными в шахматном порядке. Вывести на экран центральные 20х20
элементов третьего массива.
*/
#include <stdio.h>
#define M 10
#define N 100
__global__ void add (int *arr1, int *arr2, int *arr3) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < N && y < N) {
if ((x % 2 == 0 && y % 2 == 0) || (x % 2 != 0 && y % 2 != 0))
arr3[y*N + x] = arr2[y*N + x];
if ((x % 2 == 0 && y % 2 != 0) || (x % 2 != 0 && y % 2 == 0))
arr3[y*N + x] = arr1[y*N + x];
}
}
void printError(const char *msg, const cudaError_t &error)
{
printf("%s: ", msg);
if (error == cudaSuccess) {
printf("ok");
} else {
printf("%s", cudaGetErrorString(error));
}
printf("\n");
}
int main() {
int *arr1 = new int[N*N];
int *arr2 = new int[N*N];
for (int i = 0; i < N*N; ++i) {
arr1[i] = 0;
arr2[i] = 1;
}
int *dev_arr1;
int *dev_arr2;
int *dev_arr3;
uint memory_size = (N*N)*sizeof(int);
printError("selecting device", cudaSetDevice(0));
printError("malloc for A", cudaMalloc((void**)&dev_arr1, memory_size ));
printError("malloc for B", cudaMalloc((void**)&dev_arr2, memory_size ));
printError("malloc for C", cudaMalloc((void**)&dev_arr3, memory_size ));
printError("memcpy A", cudaMemcpy(dev_arr1, arr1, memory_size , cudaMemcpyHostToDevice));
printError("memcpy B", cudaMemcpy(dev_arr2, arr2, memory_size , cudaMemcpyHostToDevice));
dim3 threadsPerBlock(M, M);
dim3 numBlocks(N / threadsPerBlock.x, N / threadsPerBlock.y);
add<<<numBlocks, threadsPerBlock>>>(dev_arr1, dev_arr2, dev_arr3);
printError("thread sycnrhonize", cudaThreadSynchronize());
printError("memcpy C back", cudaMemcpy(arr1, dev_arr3, memory_size , cudaMemcpyDeviceToHost));
for (int i = 40; i < 60; ++i) {
for (int j = 40; j < 60; ++j) {
printf("%d ",arr1[i*N + j]);
}
printf("\n");
}
printError("free A", cudaFree(dev_arr1));
printError("free B", cudaFree(dev_arr2));
printError("free C", cudaFree(dev_arr3));
return 0;
}
|
ffca003fb0ced53d54c68fe52aa8648cb2089f79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"func_sha1.cu"
__global__ void sha1(char *text, char *output, uint32_t length,
long long int N, char *valid_hash, int hashlen){
int z = blockIdx.x*blockDim.x + threadIdx.x;
char *buf;
int i, j;
SHA1Context sha;
char he[] = "0123456789ABCDEF";
buf = (char *)malloc((hashlen+1)*sizeof(char));
/* , ,
, */
if (z < N){
/* */
char *tmp_text = text + z*(length+1)*sizeof(char);
char *tmp_output = output + z*sizeof(char);
SHA1Reset(&sha);
SHA1Input(&sha, tmp_text, length);
/* - */
if (!SHA1Result(&sha))
{
tmp_output[0] = '2';
}
else
{
for(i=0; i<5; i++){
for(j=0; j<8; j++){
buf[i*8+7-j] = he[(sha.Message_Digest[i]&(0xf<<4*j))>>(4*j)];
}
}
buf[40] = '\0';
}
/* */
int flag = 1;
for (i=0; i<hashlen; i++){
if (buf[i] != valid_hash[i]){
flag = 0;
}
}
tmp_output[0] = '0'+flag;
}
free(buf);
} | ffca003fb0ced53d54c68fe52aa8648cb2089f79.cu | #include"func_sha1.cu"
__global__ void sha1(char *text, char *output, uint32_t length,
long long int N, char *valid_hash, int hashlen){
int z = blockIdx.x*blockDim.x + threadIdx.x;
char *buf;
int i, j;
SHA1Context sha;
char he[] = "0123456789ABCDEF";
buf = (char *)malloc((hashlen+1)*sizeof(char));
/* данная проверка здесь нужна для того, чтобы потоки с индексами,
выходящими за границы файла с ключами, не выполняли работу */
if (z < N){
/* здесь мы высчитываем указатели на нужный пароль и считаем от него хэш */
char *tmp_text = text + z*(length+1)*sizeof(char);
char *tmp_output = output + z*sizeof(char);
SHA1Reset(&sha);
SHA1Input(&sha, tmp_text, length);
/* переносим хэш из структуры в строковый массив-буфер */
if (!SHA1Result(&sha))
{
tmp_output[0] = '2';
}
else
{
for(i=0; i<5; i++){
for(j=0; j<8; j++){
buf[i*8+7-j] = he[(sha.Message_Digest[i]&(0xf<<4*j))>>(4*j)];
}
}
buf[40] = '\0';
}
/* сравниваем получаенный хэш с нужным нам */
int flag = 1;
for (i=0; i<hashlen; i++){
if (buf[i] != valid_hash[i]){
flag = 0;
}
}
tmp_output[0] = '0'+flag;
}
free(buf);
} |
9dd83cc9c76b64427a6151559d3eac38b09988ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <cfloat>
// TODO(jamesreed): I would use <cmath> here but std::isnan
// and std::isinf are declared constexpr there and the nvidia
// compiler throws an error because of it
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/unique.h>
#include "caffe2/core/context_gpu.h"
#include "utility_ops.h"
namespace caffe2 {
CAFFE_KNOWN_TYPE(const float*);
__global__ void NanCheckKernel(int N, const float* X, bool* result) {
bool has_nan = false;
CUDA_1D_KERNEL_LOOP(i, N) {
// Note: we have no need to do early return, since only if this fails
// will we not need to inspect all elements. No need to optimize the
// case that will fail.
has_nan = has_nan || isnan(X[i]) || isinf(X[i]);
}
__syncthreads();
if (has_nan) {
result[0] = true;
}
}
template <>
bool NanCheckOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
const size_t N = X.size();
const float* data_ptr = X.data<float>();
scratch_.Resize(1);
math::Set<bool, CUDAContext>(
1, false, scratch_.mutable_data<bool>(), &context_);
hipLaunchKernelGGL(( NanCheckKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, X.data<float>(), scratch_.mutable_data<bool>());
bool result = false;
{
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CUDA_ENFORCE(hipMemcpyAsync(
&result,
scratch_.raw_data(),
1,
hipMemcpyDefault,
context_.cuda_stream()));
}
// Note: we must synchronize here so we can inspect the result
context_.FinishDeviceComputation();
// Print out diagnostic info if we have a NaN or inf
if (result) {
std::cerr << "Tensor contained NaN or inf: " << this->def().input(0)
<< std::endl;
for (int j = 0; j < InputSize(); j++) {
TensorCPU cpu_X;
cpu_X.ResizeLike(Input(j));
// Hack to cause allocaiton happen here, so it won't happen
// when we do CopyFrom. We need the mutex then because host->gpu
// copies seem to possibly lock with NCCL.
cpu_X.mutable_data<float>();
{
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
cpu_X.CopyFrom(Input(j), &context_);
}
context_.FinishDeviceComputation();
std::cerr << "Input tensor: " << j << ": [" << def().input(j) << "]"
<< std::endl;
tensorPrinter_.Print<float>(cpu_X);
if (j == 0) {
std::cerr << "NaN idxs:" << std::endl;
auto* cpu_X_data = cpu_X.data<float>();
for (size_t i = 0; i < cpu_X.size(); ++i) {
if (isnan(cpu_X_data[i]) || isinf(cpu_X_data[i])) {
std::cerr << i << " ";
}
}
}
std::cerr << std::endl;
}
return false;
}
// This op should act as an identity matrix if we don't find any NaNs/infs.
// Copy over the data if we are not doing this in-place.
if (&X != Y) {
Y->CopyFrom(X, &context_);
}
return true;
}
REGISTER_CUDA_OPERATOR(NanCheck, NanCheckOp<CUDAContext>);
__global__ void
ElwiseMaxKernel(const float* X, const float* Y, float* maxout, const int N) {
CUDA_1D_KERNEL_LOOP(i, N) {
maxout[i] = max(X[i], Y[i]);
}
}
template <>
bool MaxOp<float, CUDAContext>::Compute() {
float* output_data = Output(0)->mutable_data<float>();
const int N = Input(0).size();
// Run pairwise-maxes
for (int i = 1; i < InputSize(); ++i) {
hipLaunchKernelGGL(( ElwiseMaxKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
(i == 0 ? Input(0).data<float>() : Output(0)->data<float>()),
Input(i).data<float>(),
output_data,
N);
}
return true;
}
REGISTER_CUDA_OPERATOR(Max, MaxOp<float, CUDAContext>);
template<typename T_INDEX>
__global__ void
GatherKernel(const float* X, float* Y, const T_INDEX* indices, const int N, const int block_size) {
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T_INDEX idx = indices[i];
const float* src_offset = X + idx * block_size;
float* dst_offset = Y + i * block_size;
for (int j = threadIdx.x; j < block_size; j += blockDim.x) {
dst_offset[j] = src_offset[j];
}
}
}
template <>
bool GatherOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t,int64_t>>::call(
this, OperatorBase::Input<TensorCUDA>(INDICES));
}
template <>
template <typename Index>
bool GatherOp<CUDAContext>::DoRunWithType() {
auto& data = Input(DATA);
auto& indices = Input(INDICES);
auto* output = Output(0);
CAFFE_ENFORCE_GE(data.ndim(), 1, "DATA should be at least 1-D");
auto shape = indices.dims();
shape.insert(shape.end(), data.dims().begin() + 1, data.dims().end());
output->Resize(shape);
int block_size = data.size() / data.dim(0);
auto block_bytesize = data.size_from_dim(1) * data.meta().itemsize();
CAFFE_ENFORCE(
block_bytesize == data.nbytes() / data.dim(0),
"block_bytesize should be consistent with data dim");
int N = indices.size();
auto src_base = static_cast<const float*>(data.raw_data());
const Index* idxs = indices.template data<Index>();
auto out = static_cast<float*>(output->raw_mutable_data(data.meta()));
hipLaunchKernelGGL(( GatherKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
src_base, out, idxs, N, block_size
);
return true;
}
REGISTER_CUDA_OPERATOR(Gather, GatherOp<CUDAContext>);
/**
* @brief Update slices of Y in-place with a batch of weighted X's.
* Y[idx] = alpha[b] * X[b][i] + Y[idx]
* i=0,...,N-1
* b=0,...,B-1
* idx=Indices[i]
*/
template <typename T_INDEX>
__global__ void AxpySliceKernel(
const float* weight0,
const TIndex N,
const TIndex B,
const TIndex slice_size,
const float** alpha,
const float** X,
const T_INDEX* Indices,
float* Y,
const TIndex M) {
// This implementation requires that the first weight is 1.0
CUDA_KERNEL_ASSERT(weight0[0] == 1.0);
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T_INDEX idx = Indices[i];
float* y_offset = Y + (idx * slice_size);
for (int b = 0; b < B; b++) {
float a = *alpha[b];
const float* x_offset = X[b] + (i * slice_size);
for (int j = threadIdx.x; j < slice_size; j += blockDim.x) {
atomicAdd(&y_offset[j], a * x_offset[j]);
}
}
}
}
template <>
bool ScatterWeightedSumOp<float, CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(2));
}
template <>
template <typename Index>
bool ScatterWeightedSumOp<float,CUDAContext>::DoRunWithType() {
DCHECK_EQ(InputSize() % 2, 1);
auto& X0 = Input(0);
auto& weight0 = Input(1);
auto& indices = Input(2);
auto* output = Output(0);
CAFFE_ENFORCE_EQ(&X0, output, "In place operation is required");
DCHECK_GT(X0.size(), 0);
DCHECK_GT(X0.ndim(), 0) << "X0 has to be at least the vector";
DCHECK_EQ(weight0.size(), 1);
TIndex M = X0.size();
TIndex N = X0.dim(0);
TIndex K = indices.size();
TIndex block_size = M / N;
T* data = output->template mutable_data<T>();
// In order to have all device pointers of x_i (and weight_i similarly)
// consecutively in device memory, copy pointers to a host vector and then
// copy back into a device array.
const TIndex B = (InputSize() - 3) / 2;
x_data_host_.Resize(B);
weights_host_.Resize(B);
x_data_device_.Resize(B);
weights_device_.Resize(B);
const float** x_data_host = x_data_host_.mutable_data<const float*>();
const float** weights_host = weights_host_.mutable_data<const float*>();
const float** x_data_device = x_data_device_.mutable_data<const float*>();
const float** weights_device = weights_device_.mutable_data<const float*>();
for (int inp = 3; inp < InputSize(); inp += 2) {
int idx = (inp - 3) / 2;
x_data_host[idx] = static_cast<const float*>(Input(inp).raw_data());
weights_host[idx] = static_cast<const float*>(Input(inp + 1).raw_data());
}
context_.Copy<const float*, CPUContext, CUDAContext>(
B, x_data_host, x_data_device);
context_.Copy<const float*, CPUContext, CUDAContext>(
B, weights_host, weights_device);
hipLaunchKernelGGL(( AxpySliceKernel),
dim3(std::min<TIndex>(K, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
weight0.template data<float>(),
K,
B,
block_size,
weights_device,
x_data_device,
indices.template data<Index>(),
data,
M);
return true;
}
REGISTER_CUDA_OPERATOR(
ScatterWeightedSum,
ScatterWeightedSumOp<float, CUDAContext>);
#if THRUST_VERSION >= 100800
__global__ void remap_kernel(
thrust::device_ptr<int> second_order,
thrust::device_ptr<int> order,
int* output,
int N,
int K) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= K)
return;
int idx = second_order[i];
output[order[idx]] = i;
// Maybe cuda 1D kernel?
for (idx++; idx < N && (i == K - 1 || idx != second_order[i + 1]); idx++) {
output[order[idx]] = i;
}
return;
}
template <>
template <typename T>
void UniqueOp<CUDAContext>::DoRun() {
auto& inputTensor = Input(0);
// use dim32 to enforce that it's fine to have remapping of type int
int N = inputTensor.dim32(0);
CAFFE_ENFORCE_EQ(inputTensor.ndim(), 1, "Input should be a vector");
auto* uniqueTensor = Output(UNIQUE);
int* remapping = nullptr;
if (REMAPPING < OutputSize()) {
auto* remappingTensor = Output(REMAPPING);
remappingTensor->ResizeLike(inputTensor);
remapping = remappingTensor->template mutable_data<int>();
}
const T* input = inputTensor.template data<T>();
thrust_unique_buffer_.Resize(N);
auto* buffer = thrust_unique_buffer_.template mutable_data<T>();
context_.template CopyItems<CUDAContext, CUDAContext>(
inputTensor.meta(), N, input, buffer);
// Create two vector of {0, 1, ..., N-1} on CUDA device
thrust::device_vector<int> order1(N), order2(N);
thrust::sequence(
thrust::hip::par.on(context_.cuda_stream()),
order1.begin(),
order1.end());
thrust::sequence(
thrust::hip::par.on(context_.cuda_stream()),
order2.begin(),
order2.end());
// Sort the input along with order vector. So now we know where each element
// is permutated to. For example:
// input1 = 1,3,5,1,5,7,9
// order1 = 0,1,2,3,4,5,6
// Now we have:
// output = 1,1,3,5,5,7,9
// order1 = 0,3,1,2,4,5,6
thrust::sort_by_key(
thrust::hip::par.on(context_.cuda_stream()),
buffer,
buffer + N,
order1.begin());
// Use consequent unique op to get another order_buffer
// input2 = 1,1,3,5,5,7,9
// order2 = 0,1,2,3,4,5,6
// Now we have:
// output = 1,3,5,7,9
// order2 = 0,2,3,5,6
auto new_last = thrust::unique_by_key(
thrust::hip::par.on(context_.cuda_stream()),
buffer,
buffer + N,
order2.begin());
int K = new_last.first - buffer;
uniqueTensor->Resize(K);
T* unique = uniqueTensor->template mutable_data<T>();
context_.template CopyItems<CUDAContext, CUDAContext>(
thrust_unique_buffer_.meta(), K, buffer, unique);
// Compute the remapping. For example, for the number 1, if we look at
// order2[0] and order2[1], we know that input2[0:2) are all 1. They are all
// remapped to 0 in final input. And from order1, we know where they come
// from. The rest is easy.
if (remapping != nullptr) {
// record remap
hipLaunchKernelGGL(( remap_kernel),
dim3(CAFFE_GET_BLOCKS(K)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
order2.data(), order1.data(), remapping, N, K);
}
}
namespace {
REGISTER_CUDA_OPERATOR(Unique, UniqueOp<CUDAContext>);
} // namespace
#endif // THRUST_VERSION >= 100800
REGISTER_CUDA_OPERATOR(Size, SizeOp<CUDAContext>);
} // namespace caffe2
| 9dd83cc9c76b64427a6151559d3eac38b09988ad.cu | #include <math.h>
#include <cfloat>
// TODO(jamesreed): I would use <cmath> here but std::isnan
// and std::isinf are declared constexpr there and the nvidia
// compiler throws an error because of it
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/unique.h>
#include "caffe2/core/context_gpu.h"
#include "utility_ops.h"
namespace caffe2 {
CAFFE_KNOWN_TYPE(const float*);
__global__ void NanCheckKernel(int N, const float* X, bool* result) {
bool has_nan = false;
CUDA_1D_KERNEL_LOOP(i, N) {
// Note: we have no need to do early return, since only if this fails
// will we not need to inspect all elements. No need to optimize the
// case that will fail.
has_nan = has_nan || isnan(X[i]) || isinf(X[i]);
}
__syncthreads();
if (has_nan) {
result[0] = true;
}
}
template <>
bool NanCheckOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
const size_t N = X.size();
const float* data_ptr = X.data<float>();
scratch_.Resize(1);
math::Set<bool, CUDAContext>(
1, false, scratch_.mutable_data<bool>(), &context_);
NanCheckKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, X.data<float>(), scratch_.mutable_data<bool>());
bool result = false;
{
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CUDA_ENFORCE(cudaMemcpyAsync(
&result,
scratch_.raw_data(),
1,
cudaMemcpyDefault,
context_.cuda_stream()));
}
// Note: we must synchronize here so we can inspect the result
context_.FinishDeviceComputation();
// Print out diagnostic info if we have a NaN or inf
if (result) {
std::cerr << "Tensor contained NaN or inf: " << this->def().input(0)
<< std::endl;
for (int j = 0; j < InputSize(); j++) {
TensorCPU cpu_X;
cpu_X.ResizeLike(Input(j));
// Hack to cause allocaiton happen here, so it won't happen
// when we do CopyFrom. We need the mutex then because host->gpu
// copies seem to possibly lock with NCCL.
cpu_X.mutable_data<float>();
{
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
cpu_X.CopyFrom(Input(j), &context_);
}
context_.FinishDeviceComputation();
std::cerr << "Input tensor: " << j << ": [" << def().input(j) << "]"
<< std::endl;
tensorPrinter_.Print<float>(cpu_X);
if (j == 0) {
std::cerr << "NaN idxs:" << std::endl;
auto* cpu_X_data = cpu_X.data<float>();
for (size_t i = 0; i < cpu_X.size(); ++i) {
if (isnan(cpu_X_data[i]) || isinf(cpu_X_data[i])) {
std::cerr << i << " ";
}
}
}
std::cerr << std::endl;
}
return false;
}
// This op should act as an identity matrix if we don't find any NaNs/infs.
// Copy over the data if we are not doing this in-place.
if (&X != Y) {
Y->CopyFrom(X, &context_);
}
return true;
}
REGISTER_CUDA_OPERATOR(NanCheck, NanCheckOp<CUDAContext>);
__global__ void
ElwiseMaxKernel(const float* X, const float* Y, float* maxout, const int N) {
CUDA_1D_KERNEL_LOOP(i, N) {
maxout[i] = max(X[i], Y[i]);
}
}
template <>
bool MaxOp<float, CUDAContext>::Compute() {
float* output_data = Output(0)->mutable_data<float>();
const int N = Input(0).size();
// Run pairwise-maxes
for (int i = 1; i < InputSize(); ++i) {
ElwiseMaxKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
(i == 0 ? Input(0).data<float>() : Output(0)->data<float>()),
Input(i).data<float>(),
output_data,
N);
}
return true;
}
REGISTER_CUDA_OPERATOR(Max, MaxOp<float, CUDAContext>);
template<typename T_INDEX>
__global__ void
GatherKernel(const float* X, float* Y, const T_INDEX* indices, const int N, const int block_size) {
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T_INDEX idx = indices[i];
const float* src_offset = X + idx * block_size;
float* dst_offset = Y + i * block_size;
for (int j = threadIdx.x; j < block_size; j += blockDim.x) {
dst_offset[j] = src_offset[j];
}
}
}
template <>
bool GatherOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t,int64_t>>::call(
this, OperatorBase::Input<TensorCUDA>(INDICES));
}
template <>
template <typename Index>
bool GatherOp<CUDAContext>::DoRunWithType() {
auto& data = Input(DATA);
auto& indices = Input(INDICES);
auto* output = Output(0);
CAFFE_ENFORCE_GE(data.ndim(), 1, "DATA should be at least 1-D");
auto shape = indices.dims();
shape.insert(shape.end(), data.dims().begin() + 1, data.dims().end());
output->Resize(shape);
int block_size = data.size() / data.dim(0);
auto block_bytesize = data.size_from_dim(1) * data.meta().itemsize();
CAFFE_ENFORCE(
block_bytesize == data.nbytes() / data.dim(0),
"block_bytesize should be consistent with data dim");
int N = indices.size();
auto src_base = static_cast<const float*>(data.raw_data());
const Index* idxs = indices.template data<Index>();
auto out = static_cast<float*>(output->raw_mutable_data(data.meta()));
GatherKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
src_base, out, idxs, N, block_size
);
return true;
}
REGISTER_CUDA_OPERATOR(Gather, GatherOp<CUDAContext>);
/**
* @brief Update slices of Y in-place with a batch of weighted X's.
* Y[idx] = alpha[b] * X[b][i] + Y[idx]
* i=0,...,N-1
* b=0,...,B-1
* idx=Indices[i]
*/
template <typename T_INDEX>
__global__ void AxpySliceKernel(
const float* weight0,
const TIndex N,
const TIndex B,
const TIndex slice_size,
const float** alpha,
const float** X,
const T_INDEX* Indices,
float* Y,
const TIndex M) {
// This implementation requires that the first weight is 1.0
CUDA_KERNEL_ASSERT(weight0[0] == 1.0);
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T_INDEX idx = Indices[i];
float* y_offset = Y + (idx * slice_size);
for (int b = 0; b < B; b++) {
float a = *alpha[b];
const float* x_offset = X[b] + (i * slice_size);
for (int j = threadIdx.x; j < slice_size; j += blockDim.x) {
atomicAdd(&y_offset[j], a * x_offset[j]);
}
}
}
}
template <>
bool ScatterWeightedSumOp<float, CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(2));
}
template <>
template <typename Index>
bool ScatterWeightedSumOp<float,CUDAContext>::DoRunWithType() {
DCHECK_EQ(InputSize() % 2, 1);
auto& X0 = Input(0);
auto& weight0 = Input(1);
auto& indices = Input(2);
auto* output = Output(0);
CAFFE_ENFORCE_EQ(&X0, output, "In place operation is required");
DCHECK_GT(X0.size(), 0);
DCHECK_GT(X0.ndim(), 0) << "X0 has to be at least the vector";
DCHECK_EQ(weight0.size(), 1);
TIndex M = X0.size();
TIndex N = X0.dim(0);
TIndex K = indices.size();
TIndex block_size = M / N;
T* data = output->template mutable_data<T>();
// In order to have all device pointers of x_i (and weight_i similarly)
// consecutively in device memory, copy pointers to a host vector and then
// copy back into a device array.
const TIndex B = (InputSize() - 3) / 2;
x_data_host_.Resize(B);
weights_host_.Resize(B);
x_data_device_.Resize(B);
weights_device_.Resize(B);
const float** x_data_host = x_data_host_.mutable_data<const float*>();
const float** weights_host = weights_host_.mutable_data<const float*>();
const float** x_data_device = x_data_device_.mutable_data<const float*>();
const float** weights_device = weights_device_.mutable_data<const float*>();
for (int inp = 3; inp < InputSize(); inp += 2) {
int idx = (inp - 3) / 2;
x_data_host[idx] = static_cast<const float*>(Input(inp).raw_data());
weights_host[idx] = static_cast<const float*>(Input(inp + 1).raw_data());
}
context_.Copy<const float*, CPUContext, CUDAContext>(
B, x_data_host, x_data_device);
context_.Copy<const float*, CPUContext, CUDAContext>(
B, weights_host, weights_device);
AxpySliceKernel<<<
std::min<TIndex>(K, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
weight0.template data<float>(),
K,
B,
block_size,
weights_device,
x_data_device,
indices.template data<Index>(),
data,
M);
return true;
}
REGISTER_CUDA_OPERATOR(
ScatterWeightedSum,
ScatterWeightedSumOp<float, CUDAContext>);
#if THRUST_VERSION >= 100800
__global__ void remap_kernel(
thrust::device_ptr<int> second_order,
thrust::device_ptr<int> order,
int* output,
int N,
int K) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= K)
return;
int idx = second_order[i];
output[order[idx]] = i;
// Maybe cuda 1D kernel?
for (idx++; idx < N && (i == K - 1 || idx != second_order[i + 1]); idx++) {
output[order[idx]] = i;
}
return;
}
template <>
template <typename T>
void UniqueOp<CUDAContext>::DoRun() {
auto& inputTensor = Input(0);
// use dim32 to enforce that it's fine to have remapping of type int
int N = inputTensor.dim32(0);
CAFFE_ENFORCE_EQ(inputTensor.ndim(), 1, "Input should be a vector");
auto* uniqueTensor = Output(UNIQUE);
int* remapping = nullptr;
if (REMAPPING < OutputSize()) {
auto* remappingTensor = Output(REMAPPING);
remappingTensor->ResizeLike(inputTensor);
remapping = remappingTensor->template mutable_data<int>();
}
const T* input = inputTensor.template data<T>();
thrust_unique_buffer_.Resize(N);
auto* buffer = thrust_unique_buffer_.template mutable_data<T>();
context_.template CopyItems<CUDAContext, CUDAContext>(
inputTensor.meta(), N, input, buffer);
// Create two vector of {0, 1, ..., N-1} on CUDA device
thrust::device_vector<int> order1(N), order2(N);
thrust::sequence(
thrust::cuda::par.on(context_.cuda_stream()),
order1.begin(),
order1.end());
thrust::sequence(
thrust::cuda::par.on(context_.cuda_stream()),
order2.begin(),
order2.end());
// Sort the input along with order vector. So now we know where each element
// is permutated to. For example:
// input1 = 1,3,5,1,5,7,9
// order1 = 0,1,2,3,4,5,6
// Now we have:
// output = 1,1,3,5,5,7,9
// order1 = 0,3,1,2,4,5,6
thrust::sort_by_key(
thrust::cuda::par.on(context_.cuda_stream()),
buffer,
buffer + N,
order1.begin());
// Use consequent unique op to get another order_buffer
// input2 = 1,1,3,5,5,7,9
// order2 = 0,1,2,3,4,5,6
// Now we have:
// output = 1,3,5,7,9
// order2 = 0,2,3,5,6
auto new_last = thrust::unique_by_key(
thrust::cuda::par.on(context_.cuda_stream()),
buffer,
buffer + N,
order2.begin());
int K = new_last.first - buffer;
uniqueTensor->Resize(K);
T* unique = uniqueTensor->template mutable_data<T>();
context_.template CopyItems<CUDAContext, CUDAContext>(
thrust_unique_buffer_.meta(), K, buffer, unique);
// Compute the remapping. For example, for the number 1, if we look at
// order2[0] and order2[1], we know that input2[0:2) are all 1. They are all
// remapped to 0 in final input. And from order1, we know where they come
// from. The rest is easy.
if (remapping != nullptr) {
// record remap
remap_kernel<<<
CAFFE_GET_BLOCKS(K),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
order2.data(), order1.data(), remapping, N, K);
}
}
namespace {
REGISTER_CUDA_OPERATOR(Unique, UniqueOp<CUDAContext>);
} // namespace
#endif // THRUST_VERSION >= 100800
REGISTER_CUDA_OPERATOR(Size, SizeOp<CUDAContext>);
} // namespace caffe2
|
a89b9fe9afa22706b9df619971540d19882ab1e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <vector>
#include "caffe/neuron_layers.hpp"
namespace caffe {
template<typename Dtype>
__device__ inline Dtype tanhlike_gpu(Dtype x, float beta) {
return 2. / (1. + exp(-beta * x)) - 1.;
}
template <typename Dtype>
__global__ void TanhlikeForward(const int n, const Dtype* in, Dtype* out, float beta) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = tanhlike_gpu(in[index], beta);
}
}
template <typename Dtype>
void TanhlikeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
float tanh_beta = this->layer_param_.tanhlike_param().tanh_beta();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( TanhlikeForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, tanh_beta);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void TanhlikeBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, Dtype* out_diff, float beta) {
CUDA_KERNEL_LOOP(index, n) {
const Dtype tanh_x = out_data[index];
out_diff[index] = 0.5 * beta * in_diff[index] * (1. + tanh_x) * (1. - tanh_x);
}
}
template <typename Dtype>
void TanhlikeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
float tanh_beta = this->layer_param_.tanhlike_param().tanh_beta();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( TanhlikeBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top_data, bottom_diff, tanh_beta);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TanhlikeLayer);
} // namespace caffe
| a89b9fe9afa22706b9df619971540d19882ab1e3.cu | #include <cmath>
#include <vector>
#include "caffe/neuron_layers.hpp"
namespace caffe {
template<typename Dtype>
__device__ inline Dtype tanhlike_gpu(Dtype x, float beta) {
return 2. / (1. + exp(-beta * x)) - 1.;
}
template <typename Dtype>
__global__ void TanhlikeForward(const int n, const Dtype* in, Dtype* out, float beta) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = tanhlike_gpu(in[index], beta);
}
}
template <typename Dtype>
void TanhlikeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
float tanh_beta = this->layer_param_.tanhlike_param().tanh_beta();
// NOLINT_NEXT_LINE(whitespace/operators)
TanhlikeForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, tanh_beta);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void TanhlikeBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, Dtype* out_diff, float beta) {
CUDA_KERNEL_LOOP(index, n) {
const Dtype tanh_x = out_data[index];
out_diff[index] = 0.5 * beta * in_diff[index] * (1. + tanh_x) * (1. - tanh_x);
}
}
template <typename Dtype>
void TanhlikeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
float tanh_beta = this->layer_param_.tanhlike_param().tanh_beta();
// NOLINT_NEXT_LINE(whitespace/operators)
TanhlikeBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top_data, bottom_diff, tanh_beta);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TanhlikeLayer);
} // namespace caffe
|
2a108ac32587963c6dcba5248e38d61d2512ef1b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2019-2020 NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../src/cudapoa_generate_consensus.cuh" //generateConsensusHost, CUDAPOA_MAX_NODE_EDGES, CUDAPOA_MAX_NODE_ALIGNMENTS
#include "sorted_graph.hpp" //SortedGraph
#include <claraparabricks/genomeworks/utils/cudautils.hpp> //GW_CU_CHECK_ERR
#include <claraparabricks/genomeworks/utils/signed_integer_utils.hpp> //get_size
#include "gtest/gtest.h"
namespace claraparabricks
{
namespace genomeworks
{
namespace cudapoa
{
class BasicGenerateConsensus
{
public:
BasicGenerateConsensus(std::vector<uint8_t> nodes, std::vector<int16_t> sorted_graph, Int16Vec2D node_alignments,
Int16Vec2D outgoing_edges, std::vector<uint16_t> node_coverage_counts, Uint16Vec2D outgoing_edge_w)
: graph_(nodes, sorted_graph, node_alignments, node_coverage_counts, outgoing_edges)
, outgoing_edge_w_(outgoing_edge_w)
, outgoing_edges_(outgoing_edges)
{
}
void get_graph_buffers(uint8_t* nodes, int16_t* node_count,
int16_t* sorted_poa, int16_t* node_id_to_pos,
int16_t* incoming_edges, uint16_t* incoming_edge_count,
int16_t* outgoing_edges, uint16_t* outgoing_edge_count,
uint16_t* incoming_edge_w, uint16_t* node_coverage_counts,
int16_t* node_alignments, uint16_t* node_alignment_count) const
{
graph_.get_nodes(nodes, node_count);
graph_.get_sorted_graph(sorted_poa);
graph_.get_node_id_to_pos(node_id_to_pos);
graph_.get_node_coverage_counts(node_coverage_counts);
graph_.get_edges(incoming_edges, incoming_edge_count, outgoing_edges, outgoing_edge_count);
graph_.get_node_alignments(node_alignments, node_alignment_count);
get_incoming_edge_w(incoming_edge_w);
}
void get_incoming_edge_w(uint16_t* incoming_edge_w) const
{
auto outgoing_edges = graph_.get_outgoing_edges();
for (int i = 0; i < get_size(outgoing_edges); i++)
{
for (int j = 0; j < get_size(outgoing_edges[i]); j++)
{
int16_t to_node = outgoing_edges[i][j];
incoming_edge_w[to_node * CUDAPOA_MAX_NODE_EDGES + i] = outgoing_edge_w_[i][j];
}
}
}
protected:
SortedGraph graph_;
Int16Vec2D outgoing_edges_;
Uint16Vec2D outgoing_edge_w_;
};
typedef std::pair<std::string, BasicGenerateConsensus> GenerateConsensusTestPair;
// create a vector of test cases
std::vector<GenerateConsensusTestPair> getGenerateConsensusTestCases()
{
std::vector<GenerateConsensusTestPair> test_cases;
/*
* T
* / \
* graph A A A
* \ /
* A
*/
std::string ans_1 = "ATAA";
BasicGenerateConsensus gc_1({'A', 'A', 'A', 'A', 'T'}, //nodes
{0, 1, 2, 4, 3}, //sorted_graph
{{}, {}, {4}, {}, {2}}, //node_alignments
{{1}, {2, 4}, {3}, {}, {3}}, //outgoing_edges
{2, 2, 1, 2, 1}, //node_coverage_counts
{{5}, {4, 3}, {2}, {}, {1}}); //outgoing_edge_w
test_cases.emplace_back(std::move(ans_1), std::move(gc_1));
/*
* graph A T C G A
*/
std::string ans_2 = "AGCTA";
BasicGenerateConsensus gc_2({'A', 'T', 'C', 'G', 'A'}, //nodes
{0, 1, 2, 3, 4}, //sorted_graph
{{}, {}, {}, {}, {}}, //node_alignments
{{1}, {2}, {3}, {4}, {}}, //outgoing_edges
{1, 1, 1, 1, 1}, //node_coverage_counts
{{4}, {3}, {2}, {1}, {}});
test_cases.emplace_back(std::move(ans_2), std::move(gc_2));
/*
* T
* / \
* graph A C C G
* \ /
* A
*/
std::string ans_3 = "GCCA";
BasicGenerateConsensus gc_3({'A', 'A', 'C', 'G', 'C', 'T'}, //nodes
{0, 1, 4, 5, 2, 3}, //sorted_graph
{{}, {4, 5}, {}, {}, {1, 5}, {1, 4}}, //node_alignments
{{1, 4, 5}, {2}, {3}, {}, {2}, {2}}, //outgoing_edges
{3, 1, 3, 3, 1, 1}, //node_coverage_counts
{{7, 6, 5}, {4}, {3}, {}, {2}, {1}});
test_cases.emplace_back(std::move(ans_3), std::move(gc_3));
/*
* graph A T T G A
* \_____________/
*/
std::string ans_4 = "AGTTA";
BasicGenerateConsensus gc_4({'A', 'T', 'T', 'G', 'A'}, //nodes
{0, 1, 2, 3, 4}, //sorted_graph
{{}, {}, {}, {}, {}}, //node_alignments
{{1, 4}, {2}, {3}, {4}, {}}, //outgoing_edges
{2, 1, 1, 1, 2}, //node_coverage_counts
{{5, 4}, {3}, {2}, {1}, {}});
test_cases.emplace_back(std::move(ans_4), std::move(gc_4));
/*
* T G
* / \
* graph A C A T A
* \ /
* T
*/
std::string ans_5 = "ATTCA";
BasicGenerateConsensus gc_5({'A', 'T', 'G', 'T', 'A', 'C', 'A', 'T'}, //nodes
{0, 1, 5, 2, 6, 7, 3, 4}, //sorted_graph
{{}, {5}, {6, 7}, {}, {}, {1}, {2, 7}, {2, 6}}, //node_alignments
{{1, 5}, {2}, {3}, {4}, {}, {6, 7}, {3}, {3}}, //outgoing_edges
{3, 1, 1, 3, 3, 2, 1, 1}, //node_coverage_counts
{{9, 8}, {7}, {6}, {5}, {}, {4, 3}, {2}, {1}});
test_cases.emplace_back(std::move(ans_5), std::move(gc_5));
//add more test cases below
return test_cases;
}
// host function for calling the kernel to test topsort device function.
std::string testGenerateConsensus(const BasicGenerateConsensus& obj)
{
//declare device buffer
uint8_t* nodes = nullptr;
int16_t* node_count = nullptr;
int16_t* graph = nullptr;
int16_t* node_id_to_pos = nullptr;
int16_t* incoming_edges = nullptr;
uint16_t* incoming_edge_count = nullptr;
int16_t* outgoing_edges = nullptr;
uint16_t* outgoing_edge_count = nullptr;
uint16_t* incoming_edge_w = nullptr;
uint16_t* node_coverage_counts = nullptr;
int16_t* node_alignments = nullptr;
uint16_t* node_alignment_count = nullptr;
//buffers that don't need initialization
int16_t* predecessors = nullptr;
int32_t* scores = nullptr;
uint8_t* consensus = nullptr;
uint16_t* coverage = nullptr;
//default data size limits
BatchConfig batch_size;
//allocate unified memory so they can be accessed by both host and device.
GW_CU_CHECK_ERR(hipMallocManaged(&nodes, batch_size.max_nodes_per_graph * sizeof(uint8_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&node_count, sizeof(int16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&graph, batch_size.max_nodes_per_graph * sizeof(int16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&node_id_to_pos, batch_size.max_nodes_per_graph * sizeof(int16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&incoming_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(int16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&incoming_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&outgoing_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(int16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&outgoing_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&incoming_edge_w, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(uint16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&node_coverage_counts, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&node_alignments, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_ALIGNMENTS * sizeof(int16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&node_alignment_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&predecessors, batch_size.max_nodes_per_graph * sizeof(int16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&scores, batch_size.max_nodes_per_graph * sizeof(int32_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&consensus, batch_size.max_consensus_size * sizeof(uint8_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&coverage, batch_size.max_consensus_size * sizeof(uint16_t)));
//initialize all 'count' buffers
memset(incoming_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset(outgoing_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset(node_coverage_counts, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset(node_alignment_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
//calculate edge counts on host
obj.get_graph_buffers(nodes, node_count,
graph, node_id_to_pos,
incoming_edges, incoming_edge_count,
outgoing_edges, outgoing_edge_count,
incoming_edge_w, node_coverage_counts,
node_alignments, node_alignment_count);
// call the host wrapper of topsort kernel
generateConsensusTestHost(nodes,
*node_count,
graph,
node_id_to_pos,
incoming_edges,
incoming_edge_count,
outgoing_edges,
outgoing_edge_count,
incoming_edge_w,
predecessors,
scores,
consensus,
coverage,
node_coverage_counts,
node_alignments,
node_alignment_count,
batch_size.max_consensus_size);
GW_CU_CHECK_ERR(hipDeviceSynchronize());
//input and output buffers are the same ones in unified memory, so the results are updated in place
//create and return a new BasicGraph object that encodes the resulting graph structure after adding the alignment
std::string res((char*)consensus);
GW_CU_CHECK_ERR(hipFree(nodes));
GW_CU_CHECK_ERR(hipFree(node_count));
GW_CU_CHECK_ERR(hipFree(graph));
GW_CU_CHECK_ERR(hipFree(node_id_to_pos));
GW_CU_CHECK_ERR(hipFree(incoming_edges));
GW_CU_CHECK_ERR(hipFree(incoming_edge_count));
GW_CU_CHECK_ERR(hipFree(outgoing_edges));
GW_CU_CHECK_ERR(hipFree(outgoing_edge_count));
GW_CU_CHECK_ERR(hipFree(incoming_edge_w));
GW_CU_CHECK_ERR(hipFree(node_coverage_counts));
GW_CU_CHECK_ERR(hipFree(node_alignments));
GW_CU_CHECK_ERR(hipFree(node_alignment_count));
GW_CU_CHECK_ERR(hipFree(predecessors));
GW_CU_CHECK_ERR(hipFree(scores));
GW_CU_CHECK_ERR(hipFree(consensus));
GW_CU_CHECK_ERR(hipFree(coverage));
return res;
}
using ::testing::TestWithParam;
using ::testing::ValuesIn;
class GenerateConsensusTest : public TestWithParam<GenerateConsensusTestPair>
{
public:
void SetUp() {}
std::string runGenerateConsensus(const BasicGenerateConsensus& obj)
{
return testGenerateConsensus(obj);
}
};
TEST_P(GenerateConsensusTest, TestGenerateConsensuesCorrectness)
{
const auto test_case = GetParam();
EXPECT_EQ(test_case.first, runGenerateConsensus(test_case.second));
}
INSTANTIATE_TEST_SUITE_P(TestGenerateConsensus, GenerateConsensusTest, ValuesIn(getGenerateConsensusTestCases()));
} // namespace cudapoa
} // namespace genomeworks
} // namespace claraparabricks
| 2a108ac32587963c6dcba5248e38d61d2512ef1b.cu | /*
* Copyright 2019-2020 NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../src/cudapoa_generate_consensus.cuh" //generateConsensusHost, CUDAPOA_MAX_NODE_EDGES, CUDAPOA_MAX_NODE_ALIGNMENTS
#include "sorted_graph.hpp" //SortedGraph
#include <claraparabricks/genomeworks/utils/cudautils.hpp> //GW_CU_CHECK_ERR
#include <claraparabricks/genomeworks/utils/signed_integer_utils.hpp> //get_size
#include "gtest/gtest.h"
namespace claraparabricks
{
namespace genomeworks
{
namespace cudapoa
{
class BasicGenerateConsensus
{
public:
BasicGenerateConsensus(std::vector<uint8_t> nodes, std::vector<int16_t> sorted_graph, Int16Vec2D node_alignments,
Int16Vec2D outgoing_edges, std::vector<uint16_t> node_coverage_counts, Uint16Vec2D outgoing_edge_w)
: graph_(nodes, sorted_graph, node_alignments, node_coverage_counts, outgoing_edges)
, outgoing_edge_w_(outgoing_edge_w)
, outgoing_edges_(outgoing_edges)
{
}
void get_graph_buffers(uint8_t* nodes, int16_t* node_count,
int16_t* sorted_poa, int16_t* node_id_to_pos,
int16_t* incoming_edges, uint16_t* incoming_edge_count,
int16_t* outgoing_edges, uint16_t* outgoing_edge_count,
uint16_t* incoming_edge_w, uint16_t* node_coverage_counts,
int16_t* node_alignments, uint16_t* node_alignment_count) const
{
graph_.get_nodes(nodes, node_count);
graph_.get_sorted_graph(sorted_poa);
graph_.get_node_id_to_pos(node_id_to_pos);
graph_.get_node_coverage_counts(node_coverage_counts);
graph_.get_edges(incoming_edges, incoming_edge_count, outgoing_edges, outgoing_edge_count);
graph_.get_node_alignments(node_alignments, node_alignment_count);
get_incoming_edge_w(incoming_edge_w);
}
void get_incoming_edge_w(uint16_t* incoming_edge_w) const
{
auto outgoing_edges = graph_.get_outgoing_edges();
for (int i = 0; i < get_size(outgoing_edges); i++)
{
for (int j = 0; j < get_size(outgoing_edges[i]); j++)
{
int16_t to_node = outgoing_edges[i][j];
incoming_edge_w[to_node * CUDAPOA_MAX_NODE_EDGES + i] = outgoing_edge_w_[i][j];
}
}
}
protected:
SortedGraph graph_;
Int16Vec2D outgoing_edges_;
Uint16Vec2D outgoing_edge_w_;
};
typedef std::pair<std::string, BasicGenerateConsensus> GenerateConsensusTestPair;
// create a vector of test cases
std::vector<GenerateConsensusTestPair> getGenerateConsensusTestCases()
{
std::vector<GenerateConsensusTestPair> test_cases;
/*
* T
* / \
* graph A — A A
* \ /
* A
*/
std::string ans_1 = "ATAA";
BasicGenerateConsensus gc_1({'A', 'A', 'A', 'A', 'T'}, //nodes
{0, 1, 2, 4, 3}, //sorted_graph
{{}, {}, {4}, {}, {2}}, //node_alignments
{{1}, {2, 4}, {3}, {}, {3}}, //outgoing_edges
{2, 2, 1, 2, 1}, //node_coverage_counts
{{5}, {4, 3}, {2}, {}, {1}}); //outgoing_edge_w
test_cases.emplace_back(std::move(ans_1), std::move(gc_1));
/*
* graph A — T — C — G — A
*/
std::string ans_2 = "AGCTA";
BasicGenerateConsensus gc_2({'A', 'T', 'C', 'G', 'A'}, //nodes
{0, 1, 2, 3, 4}, //sorted_graph
{{}, {}, {}, {}, {}}, //node_alignments
{{1}, {2}, {3}, {4}, {}}, //outgoing_edges
{1, 1, 1, 1, 1}, //node_coverage_counts
{{4}, {3}, {2}, {1}, {}});
test_cases.emplace_back(std::move(ans_2), std::move(gc_2));
/*
* T
* / \
* graph A — C — C — G
* \ /
* A
*/
std::string ans_3 = "GCCA";
BasicGenerateConsensus gc_3({'A', 'A', 'C', 'G', 'C', 'T'}, //nodes
{0, 1, 4, 5, 2, 3}, //sorted_graph
{{}, {4, 5}, {}, {}, {1, 5}, {1, 4}}, //node_alignments
{{1, 4, 5}, {2}, {3}, {}, {2}, {2}}, //outgoing_edges
{3, 1, 3, 3, 1, 1}, //node_coverage_counts
{{7, 6, 5}, {4}, {3}, {}, {2}, {1}});
test_cases.emplace_back(std::move(ans_3), std::move(gc_3));
/*
* graph A — T — T — G — A
* \_____________/
*/
std::string ans_4 = "AGTTA";
BasicGenerateConsensus gc_4({'A', 'T', 'T', 'G', 'A'}, //nodes
{0, 1, 2, 3, 4}, //sorted_graph
{{}, {}, {}, {}, {}}, //node_alignments
{{1, 4}, {2}, {3}, {4}, {}}, //outgoing_edges
{2, 1, 1, 1, 2}, //node_coverage_counts
{{5, 4}, {3}, {2}, {1}, {}});
test_cases.emplace_back(std::move(ans_4), std::move(gc_4));
/*
* T — G
* / \
* graph A — C — A — T — A
* \ /
* T
*/
std::string ans_5 = "ATTCA";
BasicGenerateConsensus gc_5({'A', 'T', 'G', 'T', 'A', 'C', 'A', 'T'}, //nodes
{0, 1, 5, 2, 6, 7, 3, 4}, //sorted_graph
{{}, {5}, {6, 7}, {}, {}, {1}, {2, 7}, {2, 6}}, //node_alignments
{{1, 5}, {2}, {3}, {4}, {}, {6, 7}, {3}, {3}}, //outgoing_edges
{3, 1, 1, 3, 3, 2, 1, 1}, //node_coverage_counts
{{9, 8}, {7}, {6}, {5}, {}, {4, 3}, {2}, {1}});
test_cases.emplace_back(std::move(ans_5), std::move(gc_5));
//add more test cases below
return test_cases;
}
// host function for calling the kernel to test topsort device function.
std::string testGenerateConsensus(const BasicGenerateConsensus& obj)
{
//declare device buffer
uint8_t* nodes = nullptr;
int16_t* node_count = nullptr;
int16_t* graph = nullptr;
int16_t* node_id_to_pos = nullptr;
int16_t* incoming_edges = nullptr;
uint16_t* incoming_edge_count = nullptr;
int16_t* outgoing_edges = nullptr;
uint16_t* outgoing_edge_count = nullptr;
uint16_t* incoming_edge_w = nullptr;
uint16_t* node_coverage_counts = nullptr;
int16_t* node_alignments = nullptr;
uint16_t* node_alignment_count = nullptr;
//buffers that don't need initialization
int16_t* predecessors = nullptr;
int32_t* scores = nullptr;
uint8_t* consensus = nullptr;
uint16_t* coverage = nullptr;
//default data size limits
BatchConfig batch_size;
//allocate unified memory so they can be accessed by both host and device.
GW_CU_CHECK_ERR(cudaMallocManaged(&nodes, batch_size.max_nodes_per_graph * sizeof(uint8_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&node_count, sizeof(int16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&graph, batch_size.max_nodes_per_graph * sizeof(int16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&node_id_to_pos, batch_size.max_nodes_per_graph * sizeof(int16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&incoming_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(int16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&incoming_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&outgoing_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(int16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&outgoing_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&incoming_edge_w, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(uint16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&node_coverage_counts, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&node_alignments, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_ALIGNMENTS * sizeof(int16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&node_alignment_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&predecessors, batch_size.max_nodes_per_graph * sizeof(int16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&scores, batch_size.max_nodes_per_graph * sizeof(int32_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&consensus, batch_size.max_consensus_size * sizeof(uint8_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&coverage, batch_size.max_consensus_size * sizeof(uint16_t)));
//initialize all 'count' buffers
memset(incoming_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset(outgoing_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset(node_coverage_counts, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset(node_alignment_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
//calculate edge counts on host
obj.get_graph_buffers(nodes, node_count,
graph, node_id_to_pos,
incoming_edges, incoming_edge_count,
outgoing_edges, outgoing_edge_count,
incoming_edge_w, node_coverage_counts,
node_alignments, node_alignment_count);
// call the host wrapper of topsort kernel
generateConsensusTestHost(nodes,
*node_count,
graph,
node_id_to_pos,
incoming_edges,
incoming_edge_count,
outgoing_edges,
outgoing_edge_count,
incoming_edge_w,
predecessors,
scores,
consensus,
coverage,
node_coverage_counts,
node_alignments,
node_alignment_count,
batch_size.max_consensus_size);
GW_CU_CHECK_ERR(cudaDeviceSynchronize());
//input and output buffers are the same ones in unified memory, so the results are updated in place
//create and return a new BasicGraph object that encodes the resulting graph structure after adding the alignment
std::string res((char*)consensus);
GW_CU_CHECK_ERR(cudaFree(nodes));
GW_CU_CHECK_ERR(cudaFree(node_count));
GW_CU_CHECK_ERR(cudaFree(graph));
GW_CU_CHECK_ERR(cudaFree(node_id_to_pos));
GW_CU_CHECK_ERR(cudaFree(incoming_edges));
GW_CU_CHECK_ERR(cudaFree(incoming_edge_count));
GW_CU_CHECK_ERR(cudaFree(outgoing_edges));
GW_CU_CHECK_ERR(cudaFree(outgoing_edge_count));
GW_CU_CHECK_ERR(cudaFree(incoming_edge_w));
GW_CU_CHECK_ERR(cudaFree(node_coverage_counts));
GW_CU_CHECK_ERR(cudaFree(node_alignments));
GW_CU_CHECK_ERR(cudaFree(node_alignment_count));
GW_CU_CHECK_ERR(cudaFree(predecessors));
GW_CU_CHECK_ERR(cudaFree(scores));
GW_CU_CHECK_ERR(cudaFree(consensus));
GW_CU_CHECK_ERR(cudaFree(coverage));
return res;
}
using ::testing::TestWithParam;
using ::testing::ValuesIn;
class GenerateConsensusTest : public TestWithParam<GenerateConsensusTestPair>
{
public:
void SetUp() {}
std::string runGenerateConsensus(const BasicGenerateConsensus& obj)
{
return testGenerateConsensus(obj);
}
};
TEST_P(GenerateConsensusTest, TestGenerateConsensuesCorrectness)
{
const auto test_case = GetParam();
EXPECT_EQ(test_case.first, runGenerateConsensus(test_case.second));
}
INSTANTIATE_TEST_SUITE_P(TestGenerateConsensus, GenerateConsensusTest, ValuesIn(getGenerateConsensusTestCases()));
} // namespace cudapoa
} // namespace genomeworks
} // namespace claraparabricks
|
d5db4a097637673460d89f391804ef2afa7fbabd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
template <template <class Particle> class Cell >
__global__ void GPU_eme(
Plasma<Cell> *gp,
Cell<Particle> **cells,
int i_s,int l_s,int k_s,
double *E,double *H1, double *H2,
double *J,double c1,double c2, double tau,
int dx1,int dy1,int dz1,int dx2,int dy2,int dz2
)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
Cell<Particle> *c0 = cells[0];
gp->emeElement(*c0,i_s+nx,l_s+ny,k_s+nz,E,H1,H2,
J,c1,c2,tau,
dx1,dy1,dz1,dx2,dy2,dz2);
}
| d5db4a097637673460d89f391804ef2afa7fbabd.cu | template <template <class Particle> class Cell >
__global__ void GPU_eme(
Plasma<Cell> *gp,
Cell<Particle> **cells,
int i_s,int l_s,int k_s,
double *E,double *H1, double *H2,
double *J,double c1,double c2, double tau,
int dx1,int dy1,int dz1,int dx2,int dy2,int dz2
)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
Cell<Particle> *c0 = cells[0];
gp->emeElement(*c0,i_s+nx,l_s+ny,k_s+nz,E,H1,H2,
J,c1,c2,tau,
dx1,dy1,dz1,dx2,dy2,dz2);
}
|
a9c61699bc75e607cd29131b15a2fab8ff548740.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/loss_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
| a9c61699bc75e607cd29131b15a2fab8ff548740.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/loss_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
|
5901502bb9ebd18432eeabddd83287264a78e070.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Cristhian Alberto Gonzales Castillo <[email protected]>
* Copyright 2018 Alexander Ocsa <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/device_ptr.h>
#include <thrust/find.h>
#include <thrust/execution_policy.h>
#include <hipcub/hipcub.hpp>
#include <cudf/legacy/copying.hpp>
#include <cudf/replace.hpp>
#include <cudf/cudf.h>
#include <rmm/rmm.h>
#include <cudf/types.hpp>
#include <utilities/error_utils.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <utilities/cudf_utils.h>
#include <utilities/cuda_utils.hpp>
#include <utilities/column_utils.hpp>
#include <bitmask/legacy/legacy_bitmask.hpp>
#include <bitmask/legacy/bit_mask.cuh>
using bit_mask::bit_mask_t;
namespace{ //anonymous
static constexpr int warp_size = 32;
static constexpr int BLOCK_SIZE = 256;
// returns the block_sum using the given shared array of warp sums.
template <typename T>
__device__ T sum_warps(T* warp_smem)
{
T block_sum = 0;
if (threadIdx.x < warp_size) {
T my_warp_sum = warp_smem[threadIdx.x];
__shared__ typename hipcub::WarpReduce<T>::TempStorage temp_storage;
block_sum = hipcub::WarpReduce<T>(temp_storage).Sum(my_warp_sum);
}
return block_sum;
}
// return the new_value for output column at index `idx`
template<class T, bool replacement_has_nulls>
__device__ auto get_new_value(cudf::size_type idx,
const T* __restrict__ input_data,
const T* __restrict__ values_to_replace_begin,
const T* __restrict__ values_to_replace_end,
const T* __restrict__ d_replacement_values,
bit_mask_t const * __restrict__ replacement_valid)
{
auto found_ptr = thrust::find(thrust::seq, values_to_replace_begin,
values_to_replace_end, input_data[idx]);
T new_value{0};
bool output_is_valid{true};
if (found_ptr != values_to_replace_end) {
auto d = thrust::distance(values_to_replace_begin, found_ptr);
new_value = d_replacement_values[d];
if (replacement_has_nulls) {
output_is_valid = bit_mask::is_valid(replacement_valid, d);
}
} else {
new_value = input_data[idx];
}
return thrust::make_pair(new_value, output_is_valid);
}
/* --------------------------------------------------------------------------*/
/**
* @brief Kernel that replaces elements from `output_data` given the following
* rule: replace all `values_to_replace[i]` in [values_to_replace_begin`,
* `values_to_replace_end`) present in `output_data` with `d_replacement_values[i]`.
*
* @tparam input_has_nulls `true` if output column has valid mask, `false` otherwise
* @tparam replacement_has_nulls `true` if replacement_values column has valid mask, `false` otherwise
* The input_has_nulls and replacement_has_nulls template parameters allows us to specialize
* this kernel for the different scenario for performance without writing different kernel.
*
* @param[in] input_data Device array with the data to be modified
* @param[in] input_valid Valid mask associated with input_data
* @param[out] output_data Device array to store the data from input_data
* @param[out] output_valid Valid mask associated with output_data
* @param[out] output_valid_count #valid in output column
* @param[in] nrows # rows in `output_data`
* @param[in] values_to_replace_begin Device pointer to the beginning of the sequence
* of old values to be replaced
* @param[in] values_to_replace_end Device pointer to the end of the sequence
* of old values to be replaced
* @param[in] d_replacement_values Device array with the new values
* @param[in] replacement_valid Valid mask associated with d_replacement_values
*
* @returns
*/
/* ----------------------------------------------------------------------------*/
template <class T,
bool input_has_nulls, bool replacement_has_nulls>
__global__
void replace_kernel(const T* __restrict__ input_data,
bit_mask_t const * __restrict__ input_valid,
T * __restrict__ output_data,
bit_mask_t * __restrict__ output_valid,
cudf::size_type * __restrict__ output_valid_count,
cudf::size_type nrows,
const T* __restrict__ values_to_replace_begin,
const T* __restrict__ values_to_replace_end,
const T* __restrict__ d_replacement_values,
bit_mask_t const * __restrict__ replacement_valid)
{
cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
__shared__ uint32_t valid_sum[warp_size];
// init shared memory for block valid counts
if (input_has_nulls or replacement_has_nulls){
if(threadIdx.x < warp_size) valid_sum[threadIdx.x] = 0;
__syncthreads();
}
while (i < nrows) {
bool output_is_valid = true;
uint32_t bitmask = 0xffffffff;
if (input_has_nulls) {
bool const input_is_valid{bit_mask::is_valid(input_valid, i)};
output_is_valid = input_is_valid;
bitmask = __ballot_sync(active_mask, input_is_valid);
if (input_is_valid) {
thrust::tie(output_data[i], output_is_valid) =
get_new_value<T, replacement_has_nulls>(i, input_data,
values_to_replace_begin,
values_to_replace_end,
d_replacement_values,
replacement_valid);
}
} else {
thrust::tie(output_data[i], output_is_valid) =
get_new_value<T, replacement_has_nulls>(i, input_data,
values_to_replace_begin,
values_to_replace_end,
d_replacement_values,
replacement_valid);
}
/* output valid counts calculations*/
if (input_has_nulls or replacement_has_nulls){
bitmask &= __ballot_sync(active_mask, output_is_valid);
if(0 == (threadIdx.x % warp_size)){
output_valid[(int)(i/warp_size)] = bitmask;
valid_sum[(int)(threadIdx.x / warp_size)] += __popc(bitmask);
}
}
i += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, i < nrows);
}
if(input_has_nulls or replacement_has_nulls){
__syncthreads(); // waiting for the valid counts of each warp to be ready
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = sum_warps<uint32_t>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x < warp_size && 0 == (threadIdx.x % warp_size)) {
atomicAdd(output_valid_count, block_valid_count);
}
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_kernel` with the appropriate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_kernel_forwarder {
template <typename col_type>
void operator()(const gdf_column &input_col,
const gdf_column &values_to_replace,
const gdf_column &replacement_values,
gdf_column &output,
hipStream_t stream = 0)
{
const bool input_has_nulls = cudf::has_nulls(input_col);
const bool replacement_has_nulls = cudf::has_nulls(replacement_values);
const bit_mask_t* __restrict__ typed_input_valid =
reinterpret_cast<bit_mask_t*>(input_col.valid);
const bit_mask_t* __restrict__ typed_replacement_valid =
reinterpret_cast<bit_mask_t*>(replacement_values.valid);
bit_mask_t* __restrict__ typed_out_valid =
reinterpret_cast<bit_mask_t*>(output.valid);
cudf::size_type *valid_count = nullptr;
if (typed_out_valid != nullptr) {
RMM_ALLOC(&valid_count, sizeof(cudf::size_type), stream);
CUDA_TRY(hipMemsetAsync(valid_count, 0, sizeof(cudf::size_type), stream));
}
col_type const * values_to_replace_ptr{ cudf::get_data<col_type const>(values_to_replace) };
cudf::util::cuda::grid_config_1d grid{output.size, BLOCK_SIZE, 1};
auto replace = replace_kernel<col_type, true, true>;
if (input_has_nulls){
if (replacement_has_nulls){
replace = replace_kernel<col_type, true, true>;
}else{
replace = replace_kernel<col_type, true, false>;
}
}else{
if (replacement_has_nulls){
replace = replace_kernel<col_type, false, true>;
}else{
replace = replace_kernel<col_type, false, false>;
}
}
hipLaunchKernelGGL(( replace), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream,
static_cast<const col_type*>(input_col.data),
typed_input_valid,
static_cast<col_type*>(output.data),
typed_out_valid,
valid_count,
output.size,
values_to_replace_ptr,
values_to_replace_ptr + replacement_values.size,
static_cast<const col_type*>(replacement_values.data),
typed_replacement_valid);
if(typed_out_valid != nullptr){
cudf::size_type valids {0};
CUDA_TRY(hipMemcpyAsync(&valids, valid_count,
sizeof(cudf::size_type), hipMemcpyDefault, stream));
output.null_count = output.size - valids;
RMM_FREE(valid_count, stream);
}
}
};
} //end anonymous namespace
namespace cudf{
namespace detail {
gdf_column find_and_replace_all(const gdf_column &input_col,
const gdf_column &values_to_replace,
const gdf_column &replacement_values,
hipStream_t stream = 0) {
if (0 == input_col.size )
{
return cudf::empty_like(input_col);
}
if (0 == values_to_replace.size || 0 == replacement_values.size)
{
return cudf::copy(input_col, stream);
}
CUDF_EXPECTS(values_to_replace.size == replacement_values.size,
"values_to_replace and replacement_values size mismatch.");
CUDF_EXPECTS(input_col.dtype == values_to_replace.dtype &&
input_col.dtype == replacement_values.dtype,
"Columns type mismatch.");
CUDF_EXPECTS(input_col.data != nullptr, "Null input data.");
CUDF_EXPECTS(values_to_replace.data != nullptr && replacement_values.data != nullptr,
"Null replace data.");
CUDF_EXPECTS(values_to_replace.valid == nullptr || values_to_replace.null_count == 0,
"Nulls are in values_to_replace column.");
gdf_column output = cudf::allocate_like(input_col, RETAIN, stream);
if (nullptr == input_col.valid && replacement_values.valid != nullptr) {
cudf::valid_type *valid = nullptr;
cudf::size_type bytes = gdf_valid_allocation_size(input_col.size);
RMM_ALLOC(&valid, bytes, stream);
CUDA_TRY(hipMemsetAsync(valid, 0, bytes, stream));
CUDF_EXPECTS(GDF_SUCCESS == gdf_column_view(&output, output.data, valid,
input_col.size, input_col.dtype),
"cudf::replace failed to add valid mask to output col.");
}
cudf::type_dispatcher(input_col.dtype, replace_kernel_forwarder{},
input_col,
values_to_replace,
replacement_values,
output,
stream);
CHECK_STREAM(stream);
return output;
}
} //end details
/* --------------------------------------------------------------------------*/
/**
* @brief Replace elements from `input_col` according to the mapping `values_to_replace` to
* `replacement_values`, that is, replace all `values_to_replace[i]` present in `input_col`
* with `replacement_values[i]`.
*
* @param[in] col gdf_column with the data to be modified
* @param[in] values_to_replace gdf_column with the old values to be replaced
* @param[in] replacement_values gdf_column with the new values
*
* @returns output gdf_column with the modified data
*/
/* ----------------------------------------------------------------------------*/
gdf_column find_and_replace_all(const gdf_column &input_col,
const gdf_column &values_to_replace,
const gdf_column &replacement_values){
return detail::find_and_replace_all(input_col, values_to_replace, replacement_values);
}
} //end cudf
namespace{ //anonymous
using bit_mask::bit_mask_t;
template <typename Type>
__global__
void replace_nulls_with_scalar(cudf::size_type size,
const Type* __restrict__ in_data,
const bit_mask_t* __restrict__ in_valid,
const Type* __restrict__ replacement,
Type* __restrict__ out_data)
{
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int start = tid + blkid * blksz;
int step = blksz * gridsz;
for (int i=start; i<size; i+=step) {
out_data[i] = bit_mask::is_valid(in_valid, i)? in_data[i] : *replacement;
}
}
template <typename Type>
__global__
void replace_nulls_with_column(cudf::size_type size,
const Type* __restrict__ in_data,
const bit_mask_t* __restrict__ in_valid,
const Type* __restrict__ replacement,
Type* __restrict__ out_data)
{
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int start = tid + blkid * blksz;
int step = blksz * gridsz;
for (int i=start; i<size; i+=step) {
out_data[i] = bit_mask::is_valid(in_valid, i)? in_data[i] : replacement[i];
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the apropiate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_nulls_column_kernel_forwarder {
template <typename col_type>
void operator()(cudf::size_type nrows,
void* d_in_data,
cudf::valid_type* d_in_valid,
const void* d_replacement,
void* d_out_data,
hipStream_t stream = 0)
{
cudf::util::cuda::grid_config_1d grid{nrows, BLOCK_SIZE};
hipLaunchKernelGGL(( replace_nulls_with_column), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream, nrows,
static_cast<const col_type*>(d_in_data),
reinterpret_cast<bit_mask_t*>(d_in_valid),
static_cast<const col_type*>(d_replacement),
static_cast<col_type*>(d_out_data)
);
}
};
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the apropiate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_nulls_scalar_kernel_forwarder {
template <typename col_type>
void operator()(cudf::size_type nrows,
void* d_in_data,
cudf::valid_type* d_in_valid,
const void* replacement,
void* d_out_data,
hipStream_t stream = 0)
{
cudf::util::cuda::grid_config_1d grid{nrows, BLOCK_SIZE};
auto t_replacement = static_cast<const col_type*>(replacement);
col_type* d_replacement = nullptr;
RMM_TRY(RMM_ALLOC(&d_replacement, sizeof(col_type), stream));
CUDA_TRY(hipMemcpyAsync(d_replacement, t_replacement, sizeof(col_type),
hipMemcpyHostToDevice, stream));
hipLaunchKernelGGL(( replace_nulls_with_scalar), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream, nrows,
static_cast<const col_type*>(d_in_data),
reinterpret_cast<bit_mask_t*>(d_in_valid),
static_cast<const col_type*>(d_replacement),
static_cast<col_type*>(d_out_data)
);
RMM_TRY(RMM_FREE(d_replacement, stream));
}
};
} //end anonymous namespace
namespace cudf {
namespace detail {
gdf_column replace_nulls(const gdf_column& input,
const gdf_column& replacement,
hipStream_t stream)
{
if (input.size == 0) {
return cudf::empty_like(input);
}
CUDF_EXPECTS(nullptr != input.data, "Null input data");
if (input.null_count == 0 || input.valid == nullptr) {
return cudf::copy(input);
}
CUDF_EXPECTS(input.dtype == replacement.dtype, "Data type mismatch");
CUDF_EXPECTS(replacement.size == 1 || replacement.size == input.size, "Column size mismatch");
CUDF_EXPECTS(nullptr != replacement.data, "Null replacement data");
CUDF_EXPECTS(nullptr == replacement.valid || 0 == replacement.null_count,
"Invalid replacement data");
gdf_column output = cudf::allocate_like(input, NEVER, stream);
cudf::type_dispatcher(input.dtype, replace_nulls_column_kernel_forwarder{},
input.size,
input.data,
input.valid,
replacement.data,
output.data,
stream);
return output;
}
gdf_column replace_nulls(const gdf_column& input,
const gdf_scalar& replacement,
hipStream_t stream)
{
if (input.size == 0) {
return cudf::empty_like(input);
}
CUDF_EXPECTS(nullptr != input.data, "Null input data");
if (input.null_count == 0 || input.valid == nullptr) {
return cudf::copy(input);
}
CUDF_EXPECTS(input.dtype == replacement.dtype, "Data type mismatch");
CUDF_EXPECTS(true == replacement.is_valid, "Invalid replacement data");
gdf_column output = cudf::allocate_like(input, NEVER, stream);
cudf::type_dispatcher(input.dtype, replace_nulls_scalar_kernel_forwarder{},
input.size,
input.data,
input.valid,
&(replacement.data),
output.data);
return output;
}
} // namespace detail
gdf_column replace_nulls(const gdf_column& input,
const gdf_column& replacement)
{
return detail::replace_nulls(input, replacement, 0);
}
gdf_column replace_nulls(const gdf_column& input,
const gdf_scalar& replacement)
{
return detail::replace_nulls(input, replacement, 0);
}
} // namespace cudf
| 5901502bb9ebd18432eeabddd83287264a78e070.cu | /*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Cristhian Alberto Gonzales Castillo <[email protected]>
* Copyright 2018 Alexander Ocsa <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/device_ptr.h>
#include <thrust/find.h>
#include <thrust/execution_policy.h>
#include <cub/cub.cuh>
#include <cudf/legacy/copying.hpp>
#include <cudf/replace.hpp>
#include <cudf/cudf.h>
#include <rmm/rmm.h>
#include <cudf/types.hpp>
#include <utilities/error_utils.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <utilities/cudf_utils.h>
#include <utilities/cuda_utils.hpp>
#include <utilities/column_utils.hpp>
#include <bitmask/legacy/legacy_bitmask.hpp>
#include <bitmask/legacy/bit_mask.cuh>
using bit_mask::bit_mask_t;
namespace{ //anonymous
static constexpr int warp_size = 32;
static constexpr int BLOCK_SIZE = 256;
// returns the block_sum using the given shared array of warp sums.
template <typename T>
__device__ T sum_warps(T* warp_smem)
{
T block_sum = 0;
if (threadIdx.x < warp_size) {
T my_warp_sum = warp_smem[threadIdx.x];
__shared__ typename cub::WarpReduce<T>::TempStorage temp_storage;
block_sum = cub::WarpReduce<T>(temp_storage).Sum(my_warp_sum);
}
return block_sum;
}
// return the new_value for output column at index `idx`
template<class T, bool replacement_has_nulls>
__device__ auto get_new_value(cudf::size_type idx,
const T* __restrict__ input_data,
const T* __restrict__ values_to_replace_begin,
const T* __restrict__ values_to_replace_end,
const T* __restrict__ d_replacement_values,
bit_mask_t const * __restrict__ replacement_valid)
{
auto found_ptr = thrust::find(thrust::seq, values_to_replace_begin,
values_to_replace_end, input_data[idx]);
T new_value{0};
bool output_is_valid{true};
if (found_ptr != values_to_replace_end) {
auto d = thrust::distance(values_to_replace_begin, found_ptr);
new_value = d_replacement_values[d];
if (replacement_has_nulls) {
output_is_valid = bit_mask::is_valid(replacement_valid, d);
}
} else {
new_value = input_data[idx];
}
return thrust::make_pair(new_value, output_is_valid);
}
/* --------------------------------------------------------------------------*/
/**
* @brief Kernel that replaces elements from `output_data` given the following
* rule: replace all `values_to_replace[i]` in [values_to_replace_begin`,
* `values_to_replace_end`) present in `output_data` with `d_replacement_values[i]`.
*
* @tparam input_has_nulls `true` if output column has valid mask, `false` otherwise
* @tparam replacement_has_nulls `true` if replacement_values column has valid mask, `false` otherwise
* The input_has_nulls and replacement_has_nulls template parameters allows us to specialize
* this kernel for the different scenario for performance without writing different kernel.
*
* @param[in] input_data Device array with the data to be modified
* @param[in] input_valid Valid mask associated with input_data
* @param[out] output_data Device array to store the data from input_data
* @param[out] output_valid Valid mask associated with output_data
* @param[out] output_valid_count #valid in output column
* @param[in] nrows # rows in `output_data`
* @param[in] values_to_replace_begin Device pointer to the beginning of the sequence
* of old values to be replaced
* @param[in] values_to_replace_end Device pointer to the end of the sequence
* of old values to be replaced
* @param[in] d_replacement_values Device array with the new values
* @param[in] replacement_valid Valid mask associated with d_replacement_values
*
* @returns
*/
/* ----------------------------------------------------------------------------*/
template <class T,
bool input_has_nulls, bool replacement_has_nulls>
__global__
void replace_kernel(const T* __restrict__ input_data,
bit_mask_t const * __restrict__ input_valid,
T * __restrict__ output_data,
bit_mask_t * __restrict__ output_valid,
cudf::size_type * __restrict__ output_valid_count,
cudf::size_type nrows,
const T* __restrict__ values_to_replace_begin,
const T* __restrict__ values_to_replace_end,
const T* __restrict__ d_replacement_values,
bit_mask_t const * __restrict__ replacement_valid)
{
cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
__shared__ uint32_t valid_sum[warp_size];
// init shared memory for block valid counts
if (input_has_nulls or replacement_has_nulls){
if(threadIdx.x < warp_size) valid_sum[threadIdx.x] = 0;
__syncthreads();
}
while (i < nrows) {
bool output_is_valid = true;
uint32_t bitmask = 0xffffffff;
if (input_has_nulls) {
bool const input_is_valid{bit_mask::is_valid(input_valid, i)};
output_is_valid = input_is_valid;
bitmask = __ballot_sync(active_mask, input_is_valid);
if (input_is_valid) {
thrust::tie(output_data[i], output_is_valid) =
get_new_value<T, replacement_has_nulls>(i, input_data,
values_to_replace_begin,
values_to_replace_end,
d_replacement_values,
replacement_valid);
}
} else {
thrust::tie(output_data[i], output_is_valid) =
get_new_value<T, replacement_has_nulls>(i, input_data,
values_to_replace_begin,
values_to_replace_end,
d_replacement_values,
replacement_valid);
}
/* output valid counts calculations*/
if (input_has_nulls or replacement_has_nulls){
bitmask &= __ballot_sync(active_mask, output_is_valid);
if(0 == (threadIdx.x % warp_size)){
output_valid[(int)(i/warp_size)] = bitmask;
valid_sum[(int)(threadIdx.x / warp_size)] += __popc(bitmask);
}
}
i += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, i < nrows);
}
if(input_has_nulls or replacement_has_nulls){
__syncthreads(); // waiting for the valid counts of each warp to be ready
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = sum_warps<uint32_t>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x < warp_size && 0 == (threadIdx.x % warp_size)) {
atomicAdd(output_valid_count, block_valid_count);
}
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_kernel` with the appropriate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_kernel_forwarder {
template <typename col_type>
void operator()(const gdf_column &input_col,
const gdf_column &values_to_replace,
const gdf_column &replacement_values,
gdf_column &output,
cudaStream_t stream = 0)
{
const bool input_has_nulls = cudf::has_nulls(input_col);
const bool replacement_has_nulls = cudf::has_nulls(replacement_values);
const bit_mask_t* __restrict__ typed_input_valid =
reinterpret_cast<bit_mask_t*>(input_col.valid);
const bit_mask_t* __restrict__ typed_replacement_valid =
reinterpret_cast<bit_mask_t*>(replacement_values.valid);
bit_mask_t* __restrict__ typed_out_valid =
reinterpret_cast<bit_mask_t*>(output.valid);
cudf::size_type *valid_count = nullptr;
if (typed_out_valid != nullptr) {
RMM_ALLOC(&valid_count, sizeof(cudf::size_type), stream);
CUDA_TRY(cudaMemsetAsync(valid_count, 0, sizeof(cudf::size_type), stream));
}
col_type const * values_to_replace_ptr{ cudf::get_data<col_type const>(values_to_replace) };
cudf::util::cuda::grid_config_1d grid{output.size, BLOCK_SIZE, 1};
auto replace = replace_kernel<col_type, true, true>;
if (input_has_nulls){
if (replacement_has_nulls){
replace = replace_kernel<col_type, true, true>;
}else{
replace = replace_kernel<col_type, true, false>;
}
}else{
if (replacement_has_nulls){
replace = replace_kernel<col_type, false, true>;
}else{
replace = replace_kernel<col_type, false, false>;
}
}
replace<<<grid.num_blocks, BLOCK_SIZE, 0, stream>>>(
static_cast<const col_type*>(input_col.data),
typed_input_valid,
static_cast<col_type*>(output.data),
typed_out_valid,
valid_count,
output.size,
values_to_replace_ptr,
values_to_replace_ptr + replacement_values.size,
static_cast<const col_type*>(replacement_values.data),
typed_replacement_valid);
if(typed_out_valid != nullptr){
cudf::size_type valids {0};
CUDA_TRY(cudaMemcpyAsync(&valids, valid_count,
sizeof(cudf::size_type), cudaMemcpyDefault, stream));
output.null_count = output.size - valids;
RMM_FREE(valid_count, stream);
}
}
};
} //end anonymous namespace
namespace cudf{
namespace detail {
gdf_column find_and_replace_all(const gdf_column &input_col,
const gdf_column &values_to_replace,
const gdf_column &replacement_values,
cudaStream_t stream = 0) {
if (0 == input_col.size )
{
return cudf::empty_like(input_col);
}
if (0 == values_to_replace.size || 0 == replacement_values.size)
{
return cudf::copy(input_col, stream);
}
CUDF_EXPECTS(values_to_replace.size == replacement_values.size,
"values_to_replace and replacement_values size mismatch.");
CUDF_EXPECTS(input_col.dtype == values_to_replace.dtype &&
input_col.dtype == replacement_values.dtype,
"Columns type mismatch.");
CUDF_EXPECTS(input_col.data != nullptr, "Null input data.");
CUDF_EXPECTS(values_to_replace.data != nullptr && replacement_values.data != nullptr,
"Null replace data.");
CUDF_EXPECTS(values_to_replace.valid == nullptr || values_to_replace.null_count == 0,
"Nulls are in values_to_replace column.");
gdf_column output = cudf::allocate_like(input_col, RETAIN, stream);
if (nullptr == input_col.valid && replacement_values.valid != nullptr) {
cudf::valid_type *valid = nullptr;
cudf::size_type bytes = gdf_valid_allocation_size(input_col.size);
RMM_ALLOC(&valid, bytes, stream);
CUDA_TRY(cudaMemsetAsync(valid, 0, bytes, stream));
CUDF_EXPECTS(GDF_SUCCESS == gdf_column_view(&output, output.data, valid,
input_col.size, input_col.dtype),
"cudf::replace failed to add valid mask to output col.");
}
cudf::type_dispatcher(input_col.dtype, replace_kernel_forwarder{},
input_col,
values_to_replace,
replacement_values,
output,
stream);
CHECK_STREAM(stream);
return output;
}
} //end details
/* --------------------------------------------------------------------------*/
/**
* @brief Replace elements from `input_col` according to the mapping `values_to_replace` to
* `replacement_values`, that is, replace all `values_to_replace[i]` present in `input_col`
* with `replacement_values[i]`.
*
* @param[in] col gdf_column with the data to be modified
* @param[in] values_to_replace gdf_column with the old values to be replaced
* @param[in] replacement_values gdf_column with the new values
*
* @returns output gdf_column with the modified data
*/
/* ----------------------------------------------------------------------------*/
gdf_column find_and_replace_all(const gdf_column &input_col,
const gdf_column &values_to_replace,
const gdf_column &replacement_values){
return detail::find_and_replace_all(input_col, values_to_replace, replacement_values);
}
} //end cudf
namespace{ //anonymous
using bit_mask::bit_mask_t;
template <typename Type>
__global__
void replace_nulls_with_scalar(cudf::size_type size,
const Type* __restrict__ in_data,
const bit_mask_t* __restrict__ in_valid,
const Type* __restrict__ replacement,
Type* __restrict__ out_data)
{
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int start = tid + blkid * blksz;
int step = blksz * gridsz;
for (int i=start; i<size; i+=step) {
out_data[i] = bit_mask::is_valid(in_valid, i)? in_data[i] : *replacement;
}
}
template <typename Type>
__global__
void replace_nulls_with_column(cudf::size_type size,
const Type* __restrict__ in_data,
const bit_mask_t* __restrict__ in_valid,
const Type* __restrict__ replacement,
Type* __restrict__ out_data)
{
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int start = tid + blkid * blksz;
int step = blksz * gridsz;
for (int i=start; i<size; i+=step) {
out_data[i] = bit_mask::is_valid(in_valid, i)? in_data[i] : replacement[i];
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the apropiate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_nulls_column_kernel_forwarder {
template <typename col_type>
void operator()(cudf::size_type nrows,
void* d_in_data,
cudf::valid_type* d_in_valid,
const void* d_replacement,
void* d_out_data,
cudaStream_t stream = 0)
{
cudf::util::cuda::grid_config_1d grid{nrows, BLOCK_SIZE};
replace_nulls_with_column<<<grid.num_blocks, BLOCK_SIZE, 0, stream>>>(nrows,
static_cast<const col_type*>(d_in_data),
reinterpret_cast<bit_mask_t*>(d_in_valid),
static_cast<const col_type*>(d_replacement),
static_cast<col_type*>(d_out_data)
);
}
};
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the apropiate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_nulls_scalar_kernel_forwarder {
template <typename col_type>
void operator()(cudf::size_type nrows,
void* d_in_data,
cudf::valid_type* d_in_valid,
const void* replacement,
void* d_out_data,
cudaStream_t stream = 0)
{
cudf::util::cuda::grid_config_1d grid{nrows, BLOCK_SIZE};
auto t_replacement = static_cast<const col_type*>(replacement);
col_type* d_replacement = nullptr;
RMM_TRY(RMM_ALLOC(&d_replacement, sizeof(col_type), stream));
CUDA_TRY(cudaMemcpyAsync(d_replacement, t_replacement, sizeof(col_type),
cudaMemcpyHostToDevice, stream));
replace_nulls_with_scalar<<<grid.num_blocks, BLOCK_SIZE, 0, stream>>>(nrows,
static_cast<const col_type*>(d_in_data),
reinterpret_cast<bit_mask_t*>(d_in_valid),
static_cast<const col_type*>(d_replacement),
static_cast<col_type*>(d_out_data)
);
RMM_TRY(RMM_FREE(d_replacement, stream));
}
};
} //end anonymous namespace
namespace cudf {
namespace detail {
gdf_column replace_nulls(const gdf_column& input,
const gdf_column& replacement,
cudaStream_t stream)
{
if (input.size == 0) {
return cudf::empty_like(input);
}
CUDF_EXPECTS(nullptr != input.data, "Null input data");
if (input.null_count == 0 || input.valid == nullptr) {
return cudf::copy(input);
}
CUDF_EXPECTS(input.dtype == replacement.dtype, "Data type mismatch");
CUDF_EXPECTS(replacement.size == 1 || replacement.size == input.size, "Column size mismatch");
CUDF_EXPECTS(nullptr != replacement.data, "Null replacement data");
CUDF_EXPECTS(nullptr == replacement.valid || 0 == replacement.null_count,
"Invalid replacement data");
gdf_column output = cudf::allocate_like(input, NEVER, stream);
cudf::type_dispatcher(input.dtype, replace_nulls_column_kernel_forwarder{},
input.size,
input.data,
input.valid,
replacement.data,
output.data,
stream);
return output;
}
gdf_column replace_nulls(const gdf_column& input,
const gdf_scalar& replacement,
cudaStream_t stream)
{
if (input.size == 0) {
return cudf::empty_like(input);
}
CUDF_EXPECTS(nullptr != input.data, "Null input data");
if (input.null_count == 0 || input.valid == nullptr) {
return cudf::copy(input);
}
CUDF_EXPECTS(input.dtype == replacement.dtype, "Data type mismatch");
CUDF_EXPECTS(true == replacement.is_valid, "Invalid replacement data");
gdf_column output = cudf::allocate_like(input, NEVER, stream);
cudf::type_dispatcher(input.dtype, replace_nulls_scalar_kernel_forwarder{},
input.size,
input.data,
input.valid,
&(replacement.data),
output.data);
return output;
}
} // namespace detail
gdf_column replace_nulls(const gdf_column& input,
const gdf_column& replacement)
{
return detail::replace_nulls(input, replacement, 0);
}
gdf_column replace_nulls(const gdf_column& input,
const gdf_scalar& replacement)
{
return detail::replace_nulls(input, replacement, 0);
}
} // namespace cudf
|
ee0eeca85843f27da6ae7de1e80961beef67ddf3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
#include <helper_timer.h>
#include "cu_utils.h"
#include "levdist.h"
#include "cu_levdist.h"
#include "textfromfile.h"
#include "debug_table.h"
#define MEGA_B 1048576UL
#define KILO_B 1024UL
#define STR_MAXLENGTH (64 * KILO_B)
int getargs(const int argc, const char * argv[], char * text, char * patt, long * n, long *m) {
if ( argc != 3 )
return EXIT_FAILURE;
text[STR_MAXLENGTH - 1] = 0;
patt[STR_MAXLENGTH - 1] = 0;
if ( textfromfile(argv[1], STR_MAXLENGTH, text) != 0
|| textfromfile(argv[2], STR_MAXLENGTH, patt) != 0 ) {
return EXIT_FAILURE;
}
*n = (text[STR_MAXLENGTH-1] == 0? strlen(text) : STR_MAXLENGTH);
*m = (patt[STR_MAXLENGTH-1] == 0? strlen(patt) : STR_MAXLENGTH);
if ( *n < *m ) {
char * tmp = text;
text = patt;
patt = tmp;
long t = *n;
*n = *m;
*m = t;
}
if ( *n < 1000 && *m < 1000 )
fprintf(stdout, "Input: %s \n(%lu), \n%s \n(%lu)\n\n", text, *n, patt, *m);
else
fprintf(stdout, "Input: (%lu), (%lu)\n\n", *n, *m);
fflush(stdout);
return 0;
}
int main(int argc, const char * argv[]) {
char * text, *patt;
long * table;
long m, n;
long d;
hipSetDevice(0);
hipDeviceSetLimit(hipLimitDevRuntimeSyncDepth, 2);
text = (char*)malloc(sizeof(char)*STR_MAXLENGTH);
patt = (char*)malloc(sizeof(char)*STR_MAXLENGTH);
if (text == NULL || patt == NULL) {
fprintf(stderr, "malloc error.\n");
fflush(stderr);
goto exit_error;
}
if (getargs(argc, argv, text, patt, &n, &m) != 0)
goto exit_error;
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
#ifdef USE_PURE_DP
sdkResetTimer(&timer);
sdkStartTimer(&timer);
table = (long*)malloc(sizeof(long)*m*n);
d = dp_edist(table, text, n, patt, m);
#ifndef DEBUG_TABLE
free(table);
#endif
sdkStopTimer(&timer);
printf("\nElapsed %f msec.\n", sdkGetTimerValue(&timer));
printf("Edit distance (by Pure DP): %lu\n", d);
#ifdef DEBUG_TABLE
if (max(n, m) < 128)
show_table(table, n, m);
debug_table = (long*)malloc(sizeof(long)*m*n);
#endif
#endif USE_PURE_DP
sdkResetTimer(&timer);
sdkStartTimer(&timer);
long * frame = (long*)malloc(sizeof(long)*(m + n + 1));
weaving_setframe(frame, n, m);
d = weaving_edist(frame, text, n, patt, m);
sdkStopTimer(&timer);
printf("\nElapsed %f msec.\n", sdkGetTimerValue(&timer));
printf("Edit distance (by single thread weaving DP): %lu\n", d);
fprintf(stdout, "\nNow computing edit distance by cu cdp Weaving DP.\n");
fflush(stdout);
weaving_setframe(frame, n, m);
printf("frame input: \n");
for (int i = 0; i < min(n + m + 1, 64); i++) {
printf("%d, ", frame[i]);
}
printf("\n");
fflush(stdout);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
d = cu_levdist(frame, text, n, patt, m);
sdkStopTimer(&timer);
printf("\nElapsed %f msec.\n", sdkGetTimerValue(&timer));
sdkDeleteTimer(&timer);
printf("Edit distance (by CDP Weaving DP): %lu\n\n", d);
printf("frame output: \n");
for (int i = 0; i < min(n + m + 1, 64); i++) {
printf("%d, ", frame[i]);
}
printf("\n");
fflush(stdout);
free(frame);
#ifdef DEBUG_TABLE
if ( max(n,m) < 128 )
show_table(debug_table, n, m);
if ( compare_table(debug_table, table, n, m) != 0) {
printf("table compare failed.\n");
} else {
printf("two tables are identical.\n");
}
free(debug_table);
free(table);
#endif
exit_error:
free(text);
free(patt);
return 0;
}
| ee0eeca85843f27da6ae7de1e80961beef67ddf3.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
#include <helper_timer.h>
#include "cu_utils.h"
#include "levdist.h"
#include "cu_levdist.h"
#include "textfromfile.h"
#include "debug_table.h"
#define MEGA_B 1048576UL
#define KILO_B 1024UL
#define STR_MAXLENGTH (64 * KILO_B)
int getargs(const int argc, const char * argv[], char * text, char * patt, long * n, long *m) {
if ( argc != 3 )
return EXIT_FAILURE;
text[STR_MAXLENGTH - 1] = 0;
patt[STR_MAXLENGTH - 1] = 0;
if ( textfromfile(argv[1], STR_MAXLENGTH, text) != 0
|| textfromfile(argv[2], STR_MAXLENGTH, patt) != 0 ) {
return EXIT_FAILURE;
}
*n = (text[STR_MAXLENGTH-1] == 0? strlen(text) : STR_MAXLENGTH);
*m = (patt[STR_MAXLENGTH-1] == 0? strlen(patt) : STR_MAXLENGTH);
if ( *n < *m ) {
char * tmp = text;
text = patt;
patt = tmp;
long t = *n;
*n = *m;
*m = t;
}
if ( *n < 1000 && *m < 1000 )
fprintf(stdout, "Input: %s \n(%lu), \n%s \n(%lu)\n\n", text, *n, patt, *m);
else
fprintf(stdout, "Input: (%lu), (%lu)\n\n", *n, *m);
fflush(stdout);
return 0;
}
int main(int argc, const char * argv[]) {
char * text, *patt;
long * table;
long m, n;
long d;
cudaSetDevice(0);
cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, 2);
text = (char*)malloc(sizeof(char)*STR_MAXLENGTH);
patt = (char*)malloc(sizeof(char)*STR_MAXLENGTH);
if (text == NULL || patt == NULL) {
fprintf(stderr, "malloc error.\n");
fflush(stderr);
goto exit_error;
}
if (getargs(argc, argv, text, patt, &n, &m) != 0)
goto exit_error;
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
#ifdef USE_PURE_DP
sdkResetTimer(&timer);
sdkStartTimer(&timer);
table = (long*)malloc(sizeof(long)*m*n);
d = dp_edist(table, text, n, patt, m);
#ifndef DEBUG_TABLE
free(table);
#endif
sdkStopTimer(&timer);
printf("\nElapsed %f msec.\n", sdkGetTimerValue(&timer));
printf("Edit distance (by Pure DP): %lu\n", d);
#ifdef DEBUG_TABLE
if (max(n, m) < 128)
show_table(table, n, m);
debug_table = (long*)malloc(sizeof(long)*m*n);
#endif
#endif USE_PURE_DP
sdkResetTimer(&timer);
sdkStartTimer(&timer);
long * frame = (long*)malloc(sizeof(long)*(m + n + 1));
weaving_setframe(frame, n, m);
d = weaving_edist(frame, text, n, patt, m);
sdkStopTimer(&timer);
printf("\nElapsed %f msec.\n", sdkGetTimerValue(&timer));
printf("Edit distance (by single thread weaving DP): %lu\n", d);
fprintf(stdout, "\nNow computing edit distance by cu cdp Weaving DP.\n");
fflush(stdout);
weaving_setframe(frame, n, m);
printf("frame input: \n");
for (int i = 0; i < min(n + m + 1, 64); i++) {
printf("%d, ", frame[i]);
}
printf("\n");
fflush(stdout);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
d = cu_levdist(frame, text, n, patt, m);
sdkStopTimer(&timer);
printf("\nElapsed %f msec.\n", sdkGetTimerValue(&timer));
sdkDeleteTimer(&timer);
printf("Edit distance (by CDP Weaving DP): %lu\n\n", d);
printf("frame output: \n");
for (int i = 0; i < min(n + m + 1, 64); i++) {
printf("%d, ", frame[i]);
}
printf("\n");
fflush(stdout);
free(frame);
#ifdef DEBUG_TABLE
if ( max(n,m) < 128 )
show_table(debug_table, n, m);
if ( compare_table(debug_table, table, n, m) != 0) {
printf("table compare failed.\n");
} else {
printf("two tables are identical.\n");
}
free(debug_table);
free(table);
#endif
exit_error:
free(text);
free(patt);
return 0;
}
|
280bf5bcf66b23b73ee612071bf9412d73318d69.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 Equinor ASA
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#define NBLOCKS 1024
#define NTHREADS 256
__global__ void dvc_ScaLBL_D3Q19_AAeven_Greyscale(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity, double *Pressure){
int n;
// conserved momemnts
double rho,vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;
//double uu;
// non-conserved moments
double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
// q=0
f0 = dist[n];
f1 = dist[2*Np+n];
f2 = dist[1*Np+n];
f3 = dist[4*Np+n];
f4 = dist[3*Np+n];
f5 = dist[6*Np+n];
f6 = dist[5*Np+n];
f7 = dist[8*Np+n];
f8 = dist[7*Np+n];
f9 = dist[10*Np+n];
f10 = dist[9*Np+n];
f11 = dist[12*Np+n];
f12 = dist[11*Np+n];
f13 = dist[14*Np+n];
f14 = dist[13*Np+n];
f15 = dist[16*Np+n];
f16 = dist[15*Np+n];
f17 = dist[18*Np+n];
f18 = dist[17*Np+n];
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
rho = f0+f2+f1+f4+f3+f6+f5+f8+f7+f10+f9+f12+f11+f14+f13+f16+f15+f18+f17;
pressure = rho/porosity/3.0;
vx = (f1-f2+f7-f8+f9-f10+f11-f12+f13-f14)/rho+0.5*porosity*Gx;
vy = (f3-f4+f7-f8-f9+f10+f15-f16+f17-f18)/rho+0.5*porosity*Gy;
vz = (f5-f6+f11-f12-f13+f14+f15-f16-f17+f18)/rho+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the total force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = -porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx;
Fy = -porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy;
Fz = -porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz;
if (porosity==1.0){
Fx=Gx;
Fy=Gy;
Fz=Gz;
}
//------------------------ BGK collison where body force has higher-order terms ----------------------------------------------------------//
// // q=0
// dist[n] = f0*(1.0-rlx)+ rlx*0.3333333333333333*rho*(1. - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// + 0.3333333333333333*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 1
// dist[1*Np+n] = f1*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(3. + (6.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q=2
// dist[2*Np+n] = f2*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(-3. + (6.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 3
// dist[3*Np+n] = f3*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. + (6.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 4
// dist[4*Np+n] = f4*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. + (6.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 5
// dist[5*Np+n] = f5*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(3. + (6.*uz)/porosity));
//
// // q = 6
// dist[6*Np+n] = f6*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux+ uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(-3. + (6.*uz)/porosity));
//
// // q = 7
// dist[7*Np+n] = f7*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uy) + (4.5*(ux + uy)*(ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3. - (3.*ux)/porosity + (9.*(ux + uy))/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(ux + uy))/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 8
// dist[8*Np+n] = f8*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uy) + (4.5*(-ux - uy)*(-ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3. - (3.*ux)/porosity - (9.*(-ux - uy))/porosity) + Fy*(-3. - (9.*(-ux - uy))/porosity - (3.*uy)/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 9
// dist[9*Np+n] = f9*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uy) + (4.5*(ux - uy)*(ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3. - (3.*ux)/porosity + (9.*(ux - uy))/porosity) + Fy*(-3. - (9.*(ux - uy))/porosity - (3.*uy)/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 10
// dist[10*Np+n] = f10*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uy) + (4.5*(-ux + uy)*(-ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3. - (3.*ux)/porosity - (9.*(-ux + uy))/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(-ux + uy))/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 11
// dist[11*Np+n] = f11*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uz) + (4.5*(ux + uz)*(ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(3. - (3.*ux)/porosity + (9.*(ux + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(ux + uz))/porosity));
//
// // q = 12
// dist[12*Np+n] = f12*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uz) + (4.5*(-ux - uz)*(-ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(-3. - (3.*ux)/porosity - (9.*(-ux - uz))/porosity) +
// Fz*(-3. - (9.*(-ux - uz))/porosity - (3.*uz)/porosity));
//
// // q = 13
// dist[13*Np+n] = f13*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uz) + (4.5*(ux - uz)*(ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(3. - (3.*ux)/porosity + (9.*(ux - uz))/porosity) +
// Fz*(-3. - (9.*(ux - uz))/porosity - (3.*uz)/porosity));
//
// // q= 14
// dist[14*Np+n] = f14*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uz) + (4.5*(-ux + uz)*(-ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(-3. - (3.*ux)/porosity - (9.*(-ux + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(-ux + uz))/porosity));
//
// // q = 15
// dist[15*Np+n] = f15*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy + uz) + (4.5*(uy + uz)*(uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(uy + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(uy + uz))/porosity));
//
// // q = 16
// dist[16*Np+n] = f16*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy - uz) + (4.5*(-uy - uz)*(-uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. - (3.*uy)/porosity - (9.*(-uy - uz))/porosity) +
// Fz*(-3. - (9.*(-uy - uz))/porosity - (3.*uz)/porosity));
//
// // q = 17
// dist[17*Np+n] = f17*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy - uz) + (4.5*(uy - uz)*(uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(uy - uz))/porosity) +
// Fz*(-3. - (9.*(uy - uz))/porosity - (3.*uz)/porosity));
//
// // q = 18
// dist[18*Np+n] = f18*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy + uz) + (4.5*(-uy + uz)*(-uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. - (3.*uy)/porosity - (9.*(-uy + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(-uy + uz))/porosity));
//----------------------------------------------------------------------------------------------------------------------------------------//
//------------------------ BGK collison where body force has NO higher-order terms ----------------------------------------------------------//
// q=0
dist[n] = f0*(1.0-rlx)+ rlx*0.3333333333333333*rho*(1. - (1.5*(ux*ux + uy*uy + uz*uz))/porosity);
// q = 1
dist[1*Np+n] = f1*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(3.));
// q=2
dist[2*Np+n] = f2*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(-3.));
// q = 3
dist[3*Np+n] = f3*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fy*(3.));
// q = 4
dist[4*Np+n] = f4*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fy*(-3.));
// q = 5
dist[5*Np+n] = f5*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fz*(3.));
// q = 6
dist[6*Np+n] = f6*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux+ uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fz*(-3.));
// q = 7
dist[7*Np+n] = f7*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uy) + (4.5*(ux + uy)*(ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fy*(3.));
// q = 8
dist[8*Np+n] = f8*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uy) + (4.5*(-ux - uy)*(-ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fy*(-3.));
// q = 9
dist[9*Np+n] = f9*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uy) + (4.5*(ux - uy)*(ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fy*(-3.));
// q = 10
dist[10*Np+n] = f10*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uy) + (4.5*(-ux + uy)*(-ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fy*(3.));
// q = 11
dist[11*Np+n] = f11*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uz) + (4.5*(ux + uz)*(ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fz*(3.));
// q = 12
dist[12*Np+n] = f12*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uz) + (4.5*(-ux - uz)*(-ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fz*(-3.));
// q = 13
dist[13*Np+n] = f13*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uz) + (4.5*(ux - uz)*(ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fz*(-3.));
// q= 14
dist[14*Np+n] = f14*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uz) + (4.5*(-ux + uz)*(-ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fz*(3.));
// q = 15
dist[15*Np+n] = f15*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy + uz) + (4.5*(uy + uz)*(uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(3.) + Fz*(3.));
// q = 16
dist[16*Np+n] = f16*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy - uz) + (4.5*(-uy - uz)*(-uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(-3.) + Fz*(-3.));
// q = 17
dist[17*Np+n] = f17*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy - uz) + (4.5*(uy - uz)*(uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(3.) + Fz*(-3.));
// q = 18
dist[18*Np+n] = f18*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy + uz) + (4.5*(-uy + uz)*(-uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(-3.) + Fz*(3.));
//-------------------------------------------------------------------------------------------------------------------------------------------//
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_AAodd_Greyscale(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity, double *Pressure){
int n;
// conserved momemnts
double rho,vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;
//double uu;
// non-conserved moments
double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int nr1,nr2,nr3,nr4,nr5,nr6,nr7,nr8,nr9,nr10,nr11,nr12,nr13,nr14,nr15,nr16,nr17,nr18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
// q=0
f0 = dist[n];
// q=1
nr1 = neighborList[n]; // neighbor 2 ( > 10Np => odd part of dist)
f1 = dist[nr1]; // reading the f1 data into register fq
nr2 = neighborList[n+Np]; // neighbor 1 ( < 10Np => even part of dist)
f2 = dist[nr2]; // reading the f2 data into register fq
// q=3
nr3 = neighborList[n+2*Np]; // neighbor 4
f3 = dist[nr3];
// q = 4
nr4 = neighborList[n+3*Np]; // neighbor 3
f4 = dist[nr4];
// q=5
nr5 = neighborList[n+4*Np];
f5 = dist[nr5];
// q = 6
nr6 = neighborList[n+5*Np];
f6 = dist[nr6];
// q=7
nr7 = neighborList[n+6*Np];
f7 = dist[nr7];
// q = 8
nr8 = neighborList[n+7*Np];
f8 = dist[nr8];
// q=9
nr9 = neighborList[n+8*Np];
f9 = dist[nr9];
// q = 10
nr10 = neighborList[n+9*Np];
f10 = dist[nr10];
// q=11
nr11 = neighborList[n+10*Np];
f11 = dist[nr11];
// q=12
nr12 = neighborList[n+11*Np];
f12 = dist[nr12];
// q=13
nr13 = neighborList[n+12*Np];
f13 = dist[nr13];
// q=14
nr14 = neighborList[n+13*Np];
f14 = dist[nr14];
// q=15
nr15 = neighborList[n+14*Np];
f15 = dist[nr15];
// q=16
nr16 = neighborList[n+15*Np];
f16 = dist[nr16];
// q=17
//fq = dist[18*Np+n];
nr17 = neighborList[n+16*Np];
f17 = dist[nr17];
// q=18
nr18 = neighborList[n+17*Np];
f18 = dist[nr18];
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
rho = f0+f2+f1+f4+f3+f6+f5+f8+f7+f10+f9+f12+f11+f14+f13+f16+f15+f18+f17;
pressure = rho/porosity/3.0;
vx = (f1-f2+f7-f8+f9-f10+f11-f12+f13-f14)/rho+0.5*porosity*Gx;
vy = (f3-f4+f7-f8-f9+f10+f15-f16+f17-f18)/rho+0.5*porosity*Gy;
vz = (f5-f6+f11-f12-f13+f14+f15-f16-f17+f18)/rho+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the body force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = -porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx;
Fy = -porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy;
Fz = -porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz;
if (porosity==1.0){
Fx=Gx;
Fy=Gy;
Fz=Gz;
}
//------------------------ BGK collison where body force has higher-order terms ----------------------------------------------------------//
// // q=0
// dist[n] = f0*(1.0-rlx) + rlx*0.3333333333333333*rho*(1. - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// + 0.3333333333333333*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 1
// dist[nr2] = f1*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(3. + (6.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q=2
// dist[nr1] = f2*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(-3. + (6.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 3
// dist[nr4] = f3*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. + (6.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 4
// dist[nr3] = f4*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. + (6.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 5
// dist[nr6] = f5*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(3. + (6.*uz)/porosity));
//
// // q = 6
// dist[nr5] = f6*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux+ uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(-3. + (6.*uz)/porosity));
//
// // q = 7
// dist[nr8] = f7*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uy) + (4.5*(ux + uy)*(ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3. - (3.*ux)/porosity + (9.*(ux + uy))/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(ux + uy))/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 8
// dist[nr7] = f8*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uy) + (4.5*(-ux - uy)*(-ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3. - (3.*ux)/porosity - (9.*(-ux - uy))/porosity) + Fy*(-3. - (9.*(-ux - uy))/porosity - (3.*uy)/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 9
// dist[nr10] = f9*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uy) + (4.5*(ux - uy)*(ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3. - (3.*ux)/porosity + (9.*(ux - uy))/porosity) + Fy*(-3. - (9.*(ux - uy))/porosity - (3.*uy)/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 10
// dist[nr9] = f10*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uy) + (4.5*(-ux + uy)*(-ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3. - (3.*ux)/porosity - (9.*(-ux + uy))/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(-ux + uy))/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 11
// dist[nr12] = f11*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uz) + (4.5*(ux + uz)*(ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(3. - (3.*ux)/porosity + (9.*(ux + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(ux + uz))/porosity));
//
// // q = 12
// dist[nr11] = f12*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uz) + (4.5*(-ux - uz)*(-ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(-3. - (3.*ux)/porosity - (9.*(-ux - uz))/porosity) +
// Fz*(-3. - (9.*(-ux - uz))/porosity - (3.*uz)/porosity));
//
// // q = 13
// dist[nr14] = f13*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uz) + (4.5*(ux - uz)*(ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(3. - (3.*ux)/porosity + (9.*(ux - uz))/porosity) +
// Fz*(-3. - (9.*(ux - uz))/porosity - (3.*uz)/porosity));
//
// // q= 14
// dist[nr13] = f14*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uz) + (4.5*(-ux + uz)*(-ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(-3. - (3.*ux)/porosity - (9.*(-ux + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(-ux + uz))/porosity));
//
// // q = 15
// dist[nr16] = f15*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy + uz) + (4.5*(uy + uz)*(uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(uy + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(uy + uz))/porosity));
//
// // q = 16
// dist[nr15] = f16*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy - uz) + (4.5*(-uy - uz)*(-uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. - (3.*uy)/porosity - (9.*(-uy - uz))/porosity) +
// Fz*(-3. - (9.*(-uy - uz))/porosity - (3.*uz)/porosity));
//
// // q = 17
// dist[nr18] = f17*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy - uz) + (4.5*(uy - uz)*(uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(uy - uz))/porosity) +
// Fz*(-3. - (9.*(uy - uz))/porosity - (3.*uz)/porosity));
//
// // q = 18
// dist[nr17] = f18*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy + uz) + (4.5*(-uy + uz)*(-uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. - (3.*uy)/porosity - (9.*(-uy + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(-uy + uz))/porosity));
//----------------------------------------------------------------------------------------------------------------------------------------//
//------------------------ BGK collison where body force has NO higher-order terms ----------------------------------------------------------//
// q=0
dist[n] = f0*(1.0-rlx) + rlx*0.3333333333333333*rho*(1. - (1.5*(ux*ux + uy*uy + uz*uz))/porosity);
// q = 1
dist[nr2] = f1*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(3.));
// q=2
dist[nr1] = f2*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(-3.));
// q = 3
dist[nr4] = f3*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fy*(3.));
// q = 4
dist[nr3] = f4*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fy*(-3.));
// q = 5
dist[nr6] = f5*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fz*(3.));
// q = 6
dist[nr5] = f6*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux+ uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fz*(-3.));
// q = 7
dist[nr8] = f7*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uy) + (4.5*(ux + uy)*(ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fy*(3.));
// q = 8
dist[nr7] = f8*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uy) + (4.5*(-ux - uy)*(-ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fy*(-3.));
// q = 9
dist[nr10] = f9*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uy) + (4.5*(ux - uy)*(ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fy*(-3.));
// q = 10
dist[nr9] = f10*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uy) + (4.5*(-ux + uy)*(-ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fy*(3.));
// q = 11
dist[nr12] = f11*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uz) + (4.5*(ux + uz)*(ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fz*(3.));
// q = 12
dist[nr11] = f12*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uz) + (4.5*(-ux - uz)*(-ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fz*(-3.));
// q = 13
dist[nr14] = f13*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uz) + (4.5*(ux - uz)*(ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fz*(-3.));
// q= 14
dist[nr13] = f14*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uz) + (4.5*(-ux + uz)*(-ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fz*(3.));
// q = 15
dist[nr16] = f15*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy + uz) + (4.5*(uy + uz)*(uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(3.) + Fz*(3.));
// q = 16
dist[nr15] = f16*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy - uz) + (4.5*(-uy - uz)*(-uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(-3.) + Fz*(-3.));
// q = 17
dist[nr18] = f17*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy - uz) + (4.5*(uy - uz)*(uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(3.) + Fz*(-3.));
// q = 18
dist[nr17] = f18*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy + uz) + (4.5*(-uy + uz)*(-uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(-3.) + Fz*(3.));
//-------------------------------------------------------------------------------------------------------------------------------------------//
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_AAeven_Greyscale_IMRT(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity, double Den, double *Pressure){
int n;
double vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;//defined for this incompressible model
// conserved momemnts
double jx,jy,jz;
// non-conserved moments
double m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
double fq;
//double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
double rlx_setA = rlx;
double rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
const double mrt_V1=0.05263157894736842;
const double mrt_V2=0.012531328320802;
const double mrt_V3=0.04761904761904762;
const double mrt_V4=0.004594820384294068;
const double mrt_V5=0.01587301587301587;
const double mrt_V6=0.0555555555555555555555555;
const double mrt_V7=0.02777777777777778;
const double mrt_V8=0.08333333333333333;
const double mrt_V9=0.003341687552213868;
const double mrt_V10=0.003968253968253968;
const double mrt_V11=0.01388888888888889;
const double mrt_V12=0.04166666666666666;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
//........................................................................
// READ THE DISTRIBUTIONS
// (read from opposite array due to previous swap operation)
//........................................................................
// q=0
fq = dist[n];
m1 = -30.0*fq;
m2 = 12.0*fq;
// q=1
fq = dist[2*Np+n];
pressure = fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jx = fq;
m4 = -4.0*fq;
m9 = 2.0*fq;
m10 = -4.0*fq;
// f2 = dist[10*Np+n];
fq = dist[1*Np+n];
pressure += fq;
m1 -= 11.0*(fq);
m2 -= 4.0*(fq);
jx -= fq;
m4 += 4.0*(fq);
m9 += 2.0*(fq);
m10 -= 4.0*(fq);
// q=3
fq = dist[4*Np+n];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy = fq;
m6 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 = fq;
m12 = -2.0*fq;
// q = 4
fq = dist[3*Np+n];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy -= fq;
m6 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 += fq;
m12 -= 2.0*fq;
// q=5
fq = dist[6*Np+n];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz = fq;
m8 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q = 6
fq = dist[5*Np+n];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz -= fq;
m8 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q=7
fq = dist[8*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 = fq;
m16 = fq;
m17 = -fq;
// q = 8
fq = dist[7*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 += fq;
m16 -= fq;
m17 += fq;
// q=9
fq = dist[10*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 += fq;
m17 += fq;
// q = 10
fq = dist[9*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 -= fq;
m17 -= fq;
// q=11
fq = dist[12*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 = fq;
m16 -= fq;
m18 = fq;
// q=12
fq = dist[11*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 += fq;
m16 += fq;
m18 -= fq;
// q=13
fq = dist[14*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 -= fq;
m18 -= fq;
// q=14
fq = dist[13*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 += fq;
m18 += fq;
// q=15
fq = dist[16*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 = fq;
m17 += fq;
m18 -= fq;
// q=16
fq = dist[15*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 += fq;
m17 -= fq;
m18 += fq;
// q=17
fq = dist[18*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 += fq;
m18 += fq;
// q=18
fq = dist[17*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 -= fq;
m18 -= fq;
//---------------------------------------------------------------------//
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
vx = jx/Den+0.5*porosity*Gx;
vy = jy/Den+0.5*porosity*Gy;
vz = jz/Den+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the total force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = Den*(-porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx);
Fy = Den*(-porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy);
Fz = Den*(-porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz);
if (porosity==1.0){
Fx=Den*Gx;
Fy=Den*Gy;
Fz=Den*Gz;
}
//Calculate pressure for Incompressible-MRT model
pressure=0.5/porosity*(pressure-0.5*Den*u_mag*u_mag/porosity);
// //..............carry out relaxation process...............................................
// m1 = m1 + rlx_setA*((-30*Den+19*Den*(ux*ux+uy*uy+uz*uz)/porosity + 57*pressure*porosity) - m1)
// + (1-0.5*rlx_setA)*38*(Fx*ux+Fy*uy+Fz*uz)/porosity;
// m2 = m2 + rlx_setA*((12*Den - 5.5*Den*(ux*ux+uy*uy+uz*uz)/porosity-27*pressure*porosity) - m2)
// + (1-0.5*rlx_setA)*11*(-Fx*ux-Fy*uy-Fz*uz)/porosity;
// jx = jx + Fx;
// m4 = m4 + rlx_setB*((-0.6666666666666666*ux*Den) - m4)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
// jy = jy + Fy;
// m6 = m6 + rlx_setB*((-0.6666666666666666*uy*Den) - m6)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
// jz = jz + Fz;
// m8 = m8 + rlx_setB*((-0.6666666666666666*uz*Den) - m8)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
// m9 = m9 + rlx_setA*((Den*(2*ux*ux-uy*uy-uz*uz)/porosity) - m9)
// + (1-0.5*rlx_setA)*(4*Fx*ux-2*Fy*uy-2*Fz*uz)/porosity;
// m10 = m10 + rlx_setA*(-0.5*Den*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10)
// + (1-0.5*rlx_setA)*(-2*Fx*ux+Fy*uy+Fz*uz)/porosity;
// m11 = m11 + rlx_setA*((Den*(uy*uy-uz*uz)/porosity) - m11)
// + (1-0.5*rlx_setA)*(2*Fy*uy-2*Fz*uz)/porosity;
// m12 = m12 + rlx_setA*(-0.5*(Den*(uy*uy-uz*uz)/porosity)- m12)
// + (1-0.5*rlx_setA)*(-Fy*uy+Fz*uz)/porosity;
// m13 = m13 + rlx_setA*((Den*ux*uy/porosity) - m13)
// + (1-0.5*rlx_setA)*(Fy*ux+Fx*uy)/porosity;
// m14 = m14 + rlx_setA*((Den*uy*uz/porosity) - m14)
// + (1-0.5*rlx_setA)*(Fz*uy+Fy*uz)/porosity;
// m15 = m15 + rlx_setA*((Den*ux*uz/porosity) - m15)
// + (1-0.5*rlx_setA)*(Fz*ux+Fx*uz)/porosity;
// m16 = m16 + rlx_setB*( - m16);
// m17 = m17 + rlx_setB*( - m17);
// m18 = m18 + rlx_setB*( - m18);
// //.......................................................................................................
//-------------------- IMRT collison where body force has NO higher-order terms -------------//
//..............carry out relaxation process...............................................
m1 = m1 + rlx_setA*((-30*Den+19*Den*(ux*ux+uy*uy+uz*uz)/porosity + 57*pressure*porosity) - m1);
m2 = m2 + rlx_setA*((12*Den - 5.5*Den*(ux*ux+uy*uy+uz*uz)/porosity-27*pressure*porosity) - m2);
jx = jx + Fx;
m4 = m4 + rlx_setB*((-0.6666666666666666*ux*Den) - m4)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
jy = jy + Fy;
m6 = m6 + rlx_setB*((-0.6666666666666666*uy*Den) - m6)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
jz = jz + Fz;
m8 = m8 + rlx_setB*((-0.6666666666666666*uz*Den) - m8)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
m9 = m9 + rlx_setA*((Den*(2*ux*ux-uy*uy-uz*uz)/porosity) - m9);
m10 = m10 + rlx_setA*(-0.5*Den*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10);
m11 = m11 + rlx_setA*((Den*(uy*uy-uz*uz)/porosity) - m11);
m12 = m12 + rlx_setA*(-0.5*(Den*(uy*uy-uz*uz)/porosity)- m12);
m13 = m13 + rlx_setA*((Den*ux*uy/porosity) - m13);
m14 = m14 + rlx_setA*((Den*uy*uz/porosity) - m14);
m15 = m15 + rlx_setA*((Den*ux*uz/porosity) - m15);
m16 = m16 + rlx_setB*( - m16);
m17 = m17 + rlx_setB*( - m17);
m18 = m18 + rlx_setB*( - m18);
//.......................................................................................................
//.................inverse transformation......................................................
// q=0
fq = mrt_V1*Den-mrt_V2*m1+mrt_V3*m2;
dist[n] = fq;
// q = 1
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jx-m4)+mrt_V6*(m9-m10);
dist[1*Np+n] = fq;
// q=2
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m4-jx)+mrt_V6*(m9-m10);
dist[2*Np+n] = fq;
// q = 3
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jy-m6)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
dist[3*Np+n] = fq;
// q = 4
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m6-jy)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
dist[4*Np+n] = fq;
// q = 5
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jz-m8)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
dist[5*Np+n] = fq;
// q = 6
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m8-jz)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
dist[6*Np+n] = fq;
// q = 7
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jy)+0.025*(m4+m6)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m16-m17);
dist[7*Np+n] = fq;
// q = 8
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jy)-0.025*(m4+m6) +mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m17-m16);
dist[8*Np+n] = fq;
// q = 9
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jy)+0.025*(m4-m6)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13+0.125*(m16+m17);
dist[9*Np+n] = fq;
// q = 10
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jx)+0.025*(m6-m4)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13-0.125*(m16+m17);
dist[10*Np+n] = fq;
// q = 11
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jz)+0.025*(m4+m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m18-m16);
dist[11*Np+n] = fq;
// q = 12
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jz)-0.025*(m4+m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m16-m18);
dist[12*Np+n] = fq;
// q = 13
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jz)+0.025*(m4-m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12-0.25*m15-0.125*(m16+m18);
dist[13*Np+n] = fq;
// q= 14
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jz-jx)+0.025*(m8-m4)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12-0.25*m15+0.125*(m16+m18);
dist[14*Np+n] = fq;
// q = 15
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy+jz)+0.025*(m6+m8)-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m17-m18);
dist[15*Np+n] = fq;
// q = 16
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jy+jz)-0.025*(m6+m8)-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m18-m17);
dist[16*Np+n] = fq;
// q = 17
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jz)+0.025*(m6-m8)-mrt_V6*m9-mrt_V7*m10-0.25*m14+0.125*(m17+m18);
dist[17*Np+n] = fq;
// q = 18
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jz-jy)+0.025*(m8-m6)-mrt_V6*m9-mrt_V7*m10-0.25*m14-0.125*(m17+m18);
dist[18*Np+n] = fq;
//........................................................................
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_AAodd_Greyscale_IMRT(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity,double Den, double *Pressure){
int n, nread;
double vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;//defined for this incompressible model
// conserved momemnts
double jx,jy,jz;
// non-conserved moments
double m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
double fq;
//double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
double rlx_setA = rlx;
double rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
const double mrt_V1=0.05263157894736842;
const double mrt_V2=0.012531328320802;
const double mrt_V3=0.04761904761904762;
const double mrt_V4=0.004594820384294068;
const double mrt_V5=0.01587301587301587;
const double mrt_V6=0.0555555555555555555555555;
const double mrt_V7=0.02777777777777778;
const double mrt_V8=0.08333333333333333;
const double mrt_V9=0.003341687552213868;
const double mrt_V10=0.003968253968253968;
const double mrt_V11=0.01388888888888889;
const double mrt_V12=0.04166666666666666;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
//........................................................................
// READ THE DISTRIBUTIONS
// (read from opposite array due to previous swap operation)
//........................................................................
// q=0
fq = dist[n];
m1 = -30.0*fq;
m2 = 12.0*fq;
// q=1
nread = neighborList[n]; // neighbor 2 ( > 10Np => odd part of dist)
fq = dist[nread]; // reading the f1 data into register fq
pressure = fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jx = fq;
m4 = -4.0*fq;
m9 = 2.0*fq;
m10 = -4.0*fq;
// q=2
nread = neighborList[n+Np]; // neighbor 1 ( < 10Np => even part of dist)
fq = dist[nread]; // reading the f2 data into register fq
pressure += fq;
m1 -= 11.0*(fq);
m2 -= 4.0*(fq);
jx -= fq;
m4 += 4.0*(fq);
m9 += 2.0*(fq);
m10 -= 4.0*(fq);
// q=3
nread = neighborList[n+2*Np]; // neighbor 4
fq = dist[nread];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy = fq;
m6 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 = fq;
m12 = -2.0*fq;
// q = 4
nread = neighborList[n+3*Np]; // neighbor 3
fq = dist[nread];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy -= fq;
m6 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 += fq;
m12 -= 2.0*fq;
// q=5
nread = neighborList[n+4*Np];
fq = dist[nread];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz = fq;
m8 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q = 6
nread = neighborList[n+5*Np];
fq = dist[nread];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz -= fq;
m8 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q=7
nread = neighborList[n+6*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 = fq;
m16 = fq;
m17 = -fq;
// q = 8
nread = neighborList[n+7*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 += fq;
m16 -= fq;
m17 += fq;
// q=9
nread = neighborList[n+8*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 += fq;
m17 += fq;
// q = 10
nread = neighborList[n+9*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 -= fq;
m17 -= fq;
// q=11
nread = neighborList[n+10*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 = fq;
m16 -= fq;
m18 = fq;
// q=12
nread = neighborList[n+11*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 += fq;
m16 += fq;
m18 -= fq;
// q=13
nread = neighborList[n+12*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 -= fq;
m18 -= fq;
// q=14
nread = neighborList[n+13*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 += fq;
m18 += fq;
// q=15
nread = neighborList[n+14*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 = fq;
m17 += fq;
m18 -= fq;
// q=16
nread = neighborList[n+15*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 += fq;
m17 -= fq;
m18 += fq;
// q=17
nread = neighborList[n+16*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 += fq;
m18 += fq;
// q=18
nread = neighborList[n+17*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 -= fq;
m18 -= fq;
//---------------------------------------------------------------------//
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
vx = jx/Den+0.5*porosity*Gx;
vy = jy/Den+0.5*porosity*Gy;
vz = jz/Den+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the total force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = Den*(-porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx);
Fy = Den*(-porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy);
Fz = Den*(-porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz);
if (porosity==1.0){
Fx=Den*Gx;
Fy=Den*Gy;
Fz=Den*Gz;
}
//Calculate pressure for Incompressible-MRT model
pressure=0.5/porosity*(pressure-0.5*Den*u_mag*u_mag/porosity);
// //..............carry out relaxation process...............................................
// m1 = m1 + rlx_setA*((-30*Den+19*Den*(ux*ux+uy*uy+uz*uz)/porosity + 57*pressure*porosity) - m1)
// + (1-0.5*rlx_setA)*38*(Fx*ux+Fy*uy+Fz*uz)/porosity;
// m2 = m2 + rlx_setA*((12*Den - 5.5*Den*(ux*ux+uy*uy+uz*uz)/porosity-27*pressure*porosity) - m2)
// + (1-0.5*rlx_setA)*11*(-Fx*ux-Fy*uy-Fz*uz)/porosity;
// jx = jx + Fx;
// m4 = m4 + rlx_setB*((-0.6666666666666666*ux*Den) - m4)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
// jy = jy + Fy;
// m6 = m6 + rlx_setB*((-0.6666666666666666*uy*Den) - m6)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
// jz = jz + Fz;
// m8 = m8 + rlx_setB*((-0.6666666666666666*uz*Den) - m8)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
// m9 = m9 + rlx_setA*((Den*(2*ux*ux-uy*uy-uz*uz)/porosity) - m9)
// + (1-0.5*rlx_setA)*(4*Fx*ux-2*Fy*uy-2*Fz*uz)/porosity;
// m10 = m10 + rlx_setA*(-0.5*Den*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10)
// + (1-0.5*rlx_setA)*(-2*Fx*ux+Fy*uy+Fz*uz)/porosity;
// m11 = m11 + rlx_setA*((Den*(uy*uy-uz*uz)/porosity) - m11)
// + (1-0.5*rlx_setA)*(2*Fy*uy-2*Fz*uz)/porosity;
// m12 = m12 + rlx_setA*(-0.5*(Den*(uy*uy-uz*uz)/porosity)- m12)
// + (1-0.5*rlx_setA)*(-Fy*uy+Fz*uz)/porosity;
// m13 = m13 + rlx_setA*((Den*ux*uy/porosity) - m13)
// + (1-0.5*rlx_setA)*(Fy*ux+Fx*uy)/porosity;
// m14 = m14 + rlx_setA*((Den*uy*uz/porosity) - m14)
// + (1-0.5*rlx_setA)*(Fz*uy+Fy*uz)/porosity;
// m15 = m15 + rlx_setA*((Den*ux*uz/porosity) - m15)
// + (1-0.5*rlx_setA)*(Fz*ux+Fx*uz)/porosity;
// m16 = m16 + rlx_setB*( - m16);
// m17 = m17 + rlx_setB*( - m17);
// m18 = m18 + rlx_setB*( - m18);
// //.......................................................................................................
//-------------------- IMRT collison where body force has NO higher-order terms -------------//
//..............carry out relaxation process...............................................
m1 = m1 + rlx_setA*((-30*Den+19*Den*(ux*ux+uy*uy+uz*uz)/porosity + 57*pressure*porosity) - m1);
m2 = m2 + rlx_setA*((12*Den - 5.5*Den*(ux*ux+uy*uy+uz*uz)/porosity-27*pressure*porosity) - m2);
jx = jx + Fx;
m4 = m4 + rlx_setB*((-0.6666666666666666*ux*Den) - m4)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
jy = jy + Fy;
m6 = m6 + rlx_setB*((-0.6666666666666666*uy*Den) - m6)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
jz = jz + Fz;
m8 = m8 + rlx_setB*((-0.6666666666666666*uz*Den) - m8)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
m9 = m9 + rlx_setA*((Den*(2*ux*ux-uy*uy-uz*uz)/porosity) - m9);
m10 = m10 + rlx_setA*(-0.5*Den*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10);
m11 = m11 + rlx_setA*((Den*(uy*uy-uz*uz)/porosity) - m11);
m12 = m12 + rlx_setA*(-0.5*(Den*(uy*uy-uz*uz)/porosity)- m12);
m13 = m13 + rlx_setA*((Den*ux*uy/porosity) - m13);
m14 = m14 + rlx_setA*((Den*uy*uz/porosity) - m14);
m15 = m15 + rlx_setA*((Den*ux*uz/porosity) - m15);
m16 = m16 + rlx_setB*( - m16);
m17 = m17 + rlx_setB*( - m17);
m18 = m18 + rlx_setB*( - m18);
//.......................................................................................................
//.................inverse transformation......................................................
// q=0
fq = mrt_V1*Den-mrt_V2*m1+mrt_V3*m2;
dist[n] = fq;
// q = 1
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jx-m4)+mrt_V6*(m9-m10);
nread = neighborList[n+Np];
dist[nread] = fq;
// q=2
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m4-jx)+mrt_V6*(m9-m10);
nread = neighborList[n];
dist[nread] = fq;
// q = 3
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jy-m6)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
nread = neighborList[n+3*Np];
dist[nread] = fq;
// q = 4
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m6-jy)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
nread = neighborList[n+2*Np];
dist[nread] = fq;
// q = 5
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jz-m8)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
nread = neighborList[n+5*Np];
dist[nread] = fq;
// q = 6
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m8-jz)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
nread = neighborList[n+4*Np];
dist[nread] = fq;
// q = 7
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jy)+0.025*(m4+m6)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m16-m17);
nread = neighborList[n+7*Np];
dist[nread] = fq;
// q = 8
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jy)-0.025*(m4+m6) +mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m17-m16);
nread = neighborList[n+6*Np];
dist[nread] = fq;
// q = 9
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jy)+0.025*(m4-m6)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13+0.125*(m16+m17);
nread = neighborList[n+9*Np];
dist[nread] = fq;
// q = 10
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jx)+0.025*(m6-m4)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13-0.125*(m16+m17);
nread = neighborList[n+8*Np];
dist[nread] = fq;
// q = 11
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jz)+0.025*(m4+m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m18-m16);
nread = neighborList[n+11*Np];
dist[nread] = fq;
// q = 12
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jz)-0.025*(m4+m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m16-m18);
nread = neighborList[n+10*Np];
dist[nread]= fq;
// q = 13
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jz)+0.025*(m4-m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12-0.25*m15-0.125*(m16+m18);
nread = neighborList[n+13*Np];
dist[nread] = fq;
// q= 14
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jz-jx)+0.025*(m8-m4)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12-0.25*m15+0.125*(m16+m18);
nread = neighborList[n+12*Np];
dist[nread] = fq;
// q = 15
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy+jz)+0.025*(m6+m8)-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m17-m18);
nread = neighborList[n+15*Np];
dist[nread] = fq;
// q = 16
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jy+jz)-0.025*(m6+m8)-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m18-m17);
nread = neighborList[n+14*Np];
dist[nread] = fq;
// q = 17
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jz)+0.025*(m6-m8)-mrt_V6*m9-mrt_V7*m10-0.25*m14+0.125*(m17+m18);
nread = neighborList[n+17*Np];
dist[nread] = fq;
// q = 18
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jz-jy)+0.025*(m8-m6)-mrt_V6*m9-mrt_V7*m10-0.25*m14-0.125*(m17+m18);
nread = neighborList[n+16*Np];
dist[nread] = fq;
//........................................................................
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_AAodd_Greyscale_MRT(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity,double rho0, double *Pressure){
int n, nread;
int nr1,nr2,nr3,nr4,nr5,nr6;
int nr7,nr8,nr9,nr10;
int nr11,nr12,nr13,nr14;
double vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;//defined for this incompressible model
// conserved momemnts
double rho,jx,jy,jz;
// non-conserved moments
double m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
double fq;
//double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
double rlx_setA = rlx;
double rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
const double mrt_V1=0.05263157894736842;
const double mrt_V2=0.012531328320802;
const double mrt_V3=0.04761904761904762;
const double mrt_V4=0.004594820384294068;
const double mrt_V5=0.01587301587301587;
const double mrt_V6=0.0555555555555555555555555;
const double mrt_V7=0.02777777777777778;
const double mrt_V8=0.08333333333333333;
const double mrt_V9=0.003341687552213868;
const double mrt_V10=0.003968253968253968;
const double mrt_V11=0.01388888888888889;
const double mrt_V12=0.04166666666666666;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
//........................................................................
// READ THE DISTRIBUTIONS
// (read from opposite array due to previous swap operation)
//........................................................................
// q=0
fq = dist[n];
rho = fq;
m1 = -30.0*fq;
m2 = 12.0*fq;
// q=1
//nread = neighborList[n]; // neighbor 2
//fq = dist[nread]; // reading the f1 data into register fq
nr1 = neighborList[n];
fq = dist[nr1]; // reading the f1 data into register fq
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jx = fq;
m4 = -4.0*fq;
m9 = 2.0*fq;
m10 = -4.0*fq;
// f2 = dist[10*Np+n];
//nread = neighborList[n+Np]; // neighbor 1 ( < 10Np => even part of dist)
//fq = dist[nread]; // reading the f2 data into register fq
nr2 = neighborList[n+Np]; // neighbor 1 ( < 10Np => even part of dist)
fq = dist[nr2]; // reading the f2 data into register fq
rho += fq;
m1 -= 11.0*(fq);
m2 -= 4.0*(fq);
jx -= fq;
m4 += 4.0*(fq);
m9 += 2.0*(fq);
m10 -= 4.0*(fq);
// q=3
//nread = neighborList[n+2*Np]; // neighbor 4
//fq = dist[nread];
nr3 = neighborList[n+2*Np]; // neighbor 4
fq = dist[nr3];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy = fq;
m6 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 = fq;
m12 = -2.0*fq;
// q = 4
//nread = neighborList[n+3*Np]; // neighbor 3
//fq = dist[nread];
nr4 = neighborList[n+3*Np]; // neighbor 3
fq = dist[nr4];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy -= fq;
m6 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 += fq;
m12 -= 2.0*fq;
// q=5
//nread = neighborList[n+4*Np];
//fq = dist[nread];
nr5 = neighborList[n+4*Np];
fq = dist[nr5];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz = fq;
m8 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q = 6
//nread = neighborList[n+5*Np];
//fq = dist[nread];
nr6 = neighborList[n+5*Np];
fq = dist[nr6];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz -= fq;
m8 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q=7
//nread = neighborList[n+6*Np];
//fq = dist[nread];
nr7 = neighborList[n+6*Np];
fq = dist[nr7];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 = fq;
m16 = fq;
m17 = -fq;
// q = 8
//nread = neighborList[n+7*Np];
//fq = dist[nread];
nr8 = neighborList[n+7*Np];
fq = dist[nr8];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 += fq;
m16 -= fq;
m17 += fq;
// q=9
//nread = neighborList[n+8*Np];
//fq = dist[nread];
nr9 = neighborList[n+8*Np];
fq = dist[nr9];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 += fq;
m17 += fq;
// q = 10
//nread = neighborList[n+9*Np];
//fq = dist[nread];
nr10 = neighborList[n+9*Np];
fq = dist[nr10];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 -= fq;
m17 -= fq;
// q=11
//nread = neighborList[n+10*Np];
//fq = dist[nread];
nr11 = neighborList[n+10*Np];
fq = dist[nr11];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 = fq;
m16 -= fq;
m18 = fq;
// q=12
//nread = neighborList[n+11*Np];
//fq = dist[nread];
nr12 = neighborList[n+11*Np];
fq = dist[nr12];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 += fq;
m16 += fq;
m18 -= fq;
// q=13
//nread = neighborList[n+12*Np];
//fq = dist[nread];
nr13 = neighborList[n+12*Np];
fq = dist[nr13];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 -= fq;
m18 -= fq;
// q=14
//nread = neighborList[n+13*Np];
//fq = dist[nread];
nr14 = neighborList[n+13*Np];
fq = dist[nr14];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 += fq;
m18 += fq;
// q=15
nread = neighborList[n+14*Np];
fq = dist[nread];
//fq = dist[17*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 = fq;
m17 += fq;
m18 -= fq;
// q=16
nread = neighborList[n+15*Np];
fq = dist[nread];
//fq = dist[8*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 += fq;
m17 -= fq;
m18 += fq;
// q=17
//fq = dist[18*Np+n];
nread = neighborList[n+16*Np];
fq = dist[nread];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 += fq;
m18 += fq;
// q=18
nread = neighborList[n+17*Np];
fq = dist[nread];
//fq = dist[9*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 -= fq;
m18 -= fq;
//---------------------------------------------------------------------//
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
vx = jx/rho0+0.5*porosity*Gx;
vy = jy/rho0+0.5*porosity*Gy;
vz = jz/rho0+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the total force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = rho0*(-porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx);
Fy = rho0*(-porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy);
Fz = rho0*(-porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz);
if (porosity==1.0){
Fx=rho0*Gx;
Fy=rho0*Gy;
Fz=rho0*Gz;
}
//Calculate pressure for MRT model
pressure=rho/3.f;
//-------------------- MRT collison where body force has NO higher-order terms -------------//
m1 = m1 + rlx_setA*((19*(ux*ux+uy*uy+uz*uz)*rho0/porosity - 11*rho) - m1);
m2 = m2 + rlx_setA*((3*rho - 5.5*(ux*ux+uy*uy+uz*uz)*rho0/porosity) - m2);
jx = jx + Fx;
m4 = m4 + rlx_setB*((-0.6666666666666666*ux*rho0)- m4)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
jy = jy + Fy;
m6 = m6 + rlx_setB*((-0.6666666666666666*uy*rho0)- m6)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
jz = jz + Fz;
m8 = m8 + rlx_setB*((-0.6666666666666666*uz*rho0)- m8)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
m9 = m9 + rlx_setA*(((2*ux*ux-uy*uy-uz*uz)*rho0/porosity) - m9);
m10 = m10 + rlx_setA*( - m10);
//m10 = m10 + rlx_setA*(-0.5*rho0*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10);
m11 = m11 + rlx_setA*(((uy*uy-uz*uz)*rho0/porosity) - m11);
m12 = m12 + rlx_setA*( - m12);
//m12 = m12 + rlx_setA*(-0.5*(rho0*(uy*uy-uz*uz)/porosity)- m12);
m13 = m13 + rlx_setA*( (ux*uy*rho0/porosity) - m13);
m14 = m14 + rlx_setA*( (uy*uz*rho0/porosity) - m14);
m15 = m15 + rlx_setA*( (ux*uz*rho0/porosity) - m15);
m16 = m16 + rlx_setB*( - m16);
m17 = m17 + rlx_setB*( - m17);
m18 = m18 + rlx_setB*( - m18);
//.......................................................................................................
//.................inverse transformation......................................................
// q=0
fq = mrt_V1*rho-mrt_V2*m1+mrt_V3*m2;
dist[n] = fq;
// q = 1
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jx-m4)+mrt_V6*(m9-m10);
//nread = neighborList[n+Np];
dist[nr2] = fq;
// q=2
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m4-jx)+mrt_V6*(m9-m10);
//nread = neighborList[n];
dist[nr1] = fq;
// q = 3
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jy-m6)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
//nread = neighborList[n+3*Np];
dist[nr4] = fq;
// q = 4
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m6-jy)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
//nread = neighborList[n+2*Np];
dist[nr3] = fq;
// q = 5
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jz-m8)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
//nread = neighborList[n+5*Np];
dist[nr6] = fq;
// q = 6
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m8-jz)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
//nread = neighborList[n+4*Np];
dist[nr5] = fq;
// q = 7
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jy)+0.025*(m4+m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m16-m17);
//nread = neighborList[n+7*Np];
dist[nr8] = fq;
// q = 8
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jy)-0.025*(m4+m6) +mrt_V7*m9+mrt_V11*m10+mrt_V8*m11
+mrt_V12*m12+0.25*m13+0.125*(m17-m16);
//nread = neighborList[n+6*Np];
dist[nr7] = fq;
// q = 9
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jy)+0.025*(m4-m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13+0.125*(m16+m17);
//nread = neighborList[n+9*Np];
dist[nr10] = fq;
// q = 10
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jx)+0.025*(m6-m4)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13-0.125*(m16+m17);
//nread = neighborList[n+8*Np];
dist[nr9] = fq;
// q = 11
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx+jz)+0.025*(m4+m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12+0.25*m15+0.125*(m18-m16);
//nread = neighborList[n+11*Np];
dist[nr12] = fq;
// q = 12
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jz)-0.025*(m4+m8)+
mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m16-m18);
//nread = neighborList[n+10*Np];
dist[nr11]= fq;
// q = 13
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx-jz)+0.025*(m4-m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15-0.125*(m16+m18);
//nread = neighborList[n+13*Np];
dist[nr14] = fq;
// q= 14
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jx)+0.025*(m8-m4)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15+0.125*(m16+m18);
//nread = neighborList[n+12*Np];
dist[nr13] = fq;
// q = 15
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy+jz)+0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m17-m18);
nread = neighborList[n+15*Np];
dist[nread] = fq;
// q = 16
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2-0.1*(jy+jz)-0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m18-m17);
nread = neighborList[n+14*Np];
dist[nread] = fq;
// q = 17
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy-jz)+0.025*(m6-m8)
-mrt_V6*m9-mrt_V7*m10-0.25*m14+0.125*(m17+m18);
nread = neighborList[n+17*Np];
dist[nread] = fq;
// q = 18
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jy)+0.025*(m8-m6)
-mrt_V6*m9-mrt_V7*m10-0.25*m14-0.125*(m17+m18);
nread = neighborList[n+16*Np];
dist[nread] = fq;
//........................................................................
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_AAeven_Greyscale_MRT(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity,double rho0, double *Pressure){
int n;
double vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;//defined for this incompressible model
// conserved momemnts
double rho,jx,jy,jz;
// non-conserved moments
double m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
double fq;
//double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
double rlx_setA = rlx;
double rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
const double mrt_V1=0.05263157894736842;
const double mrt_V2=0.012531328320802;
const double mrt_V3=0.04761904761904762;
const double mrt_V4=0.004594820384294068;
const double mrt_V5=0.01587301587301587;
const double mrt_V6=0.0555555555555555555555555;
const double mrt_V7=0.02777777777777778;
const double mrt_V8=0.08333333333333333;
const double mrt_V9=0.003341687552213868;
const double mrt_V10=0.003968253968253968;
const double mrt_V11=0.01388888888888889;
const double mrt_V12=0.04166666666666666;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
//........................................................................
// READ THE DISTRIBUTIONS
// (read from opposite array due to previous swap operation)
//........................................................................
// q=0
fq = dist[n];
rho = fq;
m1 = -30.0*fq;
m2 = 12.0*fq;
// q=1
fq = dist[2*Np+n];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jx = fq;
m4 = -4.0*fq;
m9 = 2.0*fq;
m10 = -4.0*fq;
// f2 = dist[10*Np+n];
fq = dist[1*Np+n];
rho += fq;
m1 -= 11.0*(fq);
m2 -= 4.0*(fq);
jx -= fq;
m4 += 4.0*(fq);
m9 += 2.0*(fq);
m10 -= 4.0*(fq);
// q=3
fq = dist[4*Np+n];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy = fq;
m6 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 = fq;
m12 = -2.0*fq;
// q = 4
fq = dist[3*Np+n];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy -= fq;
m6 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 += fq;
m12 -= 2.0*fq;
// q=5
fq = dist[6*Np+n];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz = fq;
m8 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q = 6
fq = dist[5*Np+n];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz -= fq;
m8 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q=7
fq = dist[8*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 = fq;
m16 = fq;
m17 = -fq;
// q = 8
fq = dist[7*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 += fq;
m16 -= fq;
m17 += fq;
// q=9
fq = dist[10*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 += fq;
m17 += fq;
// q = 10
fq = dist[9*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 -= fq;
m17 -= fq;
// q=11
fq = dist[12*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 = fq;
m16 -= fq;
m18 = fq;
// q=12
fq = dist[11*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 += fq;
m16 += fq;
m18 -= fq;
// q=13
fq = dist[14*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 -= fq;
m18 -= fq;
// q=14
fq = dist[13*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 += fq;
m18 += fq;
// q=15
fq = dist[16*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 = fq;
m17 += fq;
m18 -= fq;
// q=16
fq = dist[15*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 += fq;
m17 -= fq;
m18 += fq;
// q=17
fq = dist[18*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 += fq;
m18 += fq;
// q=18
fq = dist[17*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 -= fq;
m18 -= fq;
//---------------------------------------------------------------------//
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
vx = jx/rho0+0.5*porosity*Gx;
vy = jy/rho0+0.5*porosity*Gy;
vz = jz/rho0+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the total force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = rho0*(-porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx);
Fy = rho0*(-porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy);
Fz = rho0*(-porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz);
if (porosity==1.0){
Fx=rho0*Gx;
Fy=rho0*Gy;
Fz=rho0*Gz;
}
//Calculate pressure for Incompressible-MRT model
pressure=rho/3.f;
//-------------------- IMRT collison where body force has NO higher-order terms -------------//
m1 = m1 + rlx_setA*((19*(ux*ux+uy*uy+uz*uz)*rho0/porosity - 11*rho) - m1);
m2 = m2 + rlx_setA*((3*rho - 5.5*(ux*ux+uy*uy+uz*uz)*rho0/porosity) - m2);
jx = jx + Fx;
m4 = m4 + rlx_setB*((-0.6666666666666666*ux*rho0)- m4)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
jy = jy + Fy;
m6 = m6 + rlx_setB*((-0.6666666666666666*uy*rho0)- m6)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
jz = jz + Fz;
m8 = m8 + rlx_setB*((-0.6666666666666666*uz*rho0)- m8)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
m9 = m9 + rlx_setA*(((2*ux*ux-uy*uy-uz*uz)*rho0/porosity) - m9);
m10 = m10 + rlx_setA*( - m10);
//m10 = m10 + rlx_setA*(-0.5*rho0*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10);
m11 = m11 + rlx_setA*(((uy*uy-uz*uz)*rho0/porosity) - m11);
m12 = m12 + rlx_setA*( - m12);
//m12 = m12 + rlx_setA*(-0.5*(rho0*(uy*uy-uz*uz)/porosity)- m12);
m13 = m13 + rlx_setA*( (ux*uy*rho0/porosity) - m13);
m14 = m14 + rlx_setA*( (uy*uz*rho0/porosity) - m14);
m15 = m15 + rlx_setA*( (ux*uz*rho0/porosity) - m15);
m16 = m16 + rlx_setB*( - m16);
m17 = m17 + rlx_setB*( - m17);
m18 = m18 + rlx_setB*( - m18);
//.......................................................................................................
//.................inverse transformation......................................................
// q=0
fq = mrt_V1*rho-mrt_V2*m1+mrt_V3*m2;
dist[n] = fq;
// q = 1
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jx-m4)+mrt_V6*(m9-m10);
dist[1*Np+n] = fq;
// q=2
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m4-jx)+mrt_V6*(m9-m10);
dist[2*Np+n] = fq;
// q = 3
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jy-m6)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
dist[3*Np+n] = fq;
// q = 4
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m6-jy)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
dist[4*Np+n] = fq;
// q = 5
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jz-m8)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
dist[5*Np+n] = fq;
// q = 6
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m8-jz)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
dist[6*Np+n] = fq;
// q = 7
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jy)+0.025*(m4+m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m16-m17);
dist[7*Np+n] = fq;
// q = 8
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jy)-0.025*(m4+m6) +mrt_V7*m9+mrt_V11*m10+mrt_V8*m11
+mrt_V12*m12+0.25*m13+0.125*(m17-m16);
dist[8*Np+n] = fq;
// q = 9
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jy)+0.025*(m4-m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13+0.125*(m16+m17);
dist[9*Np+n] = fq;
// q = 10
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jx)+0.025*(m6-m4)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13-0.125*(m16+m17);
dist[10*Np+n] = fq;
// q = 11
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx+jz)+0.025*(m4+m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12+0.25*m15+0.125*(m18-m16);
dist[11*Np+n] = fq;
// q = 12
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jz)-0.025*(m4+m8)+
mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m16-m18);
dist[12*Np+n] = fq;
// q = 13
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx-jz)+0.025*(m4-m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15-0.125*(m16+m18);
dist[13*Np+n] = fq;
// q= 14
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jx)+0.025*(m8-m4)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15+0.125*(m16+m18);
dist[14*Np+n] = fq;
// q = 15
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy+jz)+0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m17-m18);
dist[15*Np+n] = fq;
// q = 16
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2-0.1*(jy+jz)-0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m18-m17);
dist[16*Np+n] = fq;
// q = 17
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy-jz)+0.025*(m6-m8)
-mrt_V6*m9-mrt_V7*m10-0.25*m14+0.125*(m17+m18);
dist[17*Np+n] = fq;
// q = 18
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jy)+0.025*(m8-m6)
-mrt_V6*m9-mrt_V7*m10-0.25*m14-0.125*(m17+m18);
dist[18*Np+n] = fq;
//........................................................................
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_GreyIMRT_Init(double *dist, int Np, double Den)
{
int n;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x;
if (n<Np ){
dist[n] = Den - 0.6666666666666667;
dist[Np+n] = 0.055555555555555555; //double(100*n)+1.f;
dist[2*Np+n] = 0.055555555555555555; //double(100*n)+2.f;
dist[3*Np+n] = 0.055555555555555555; //double(100*n)+3.f;
dist[4*Np+n] = 0.055555555555555555; //double(100*n)+4.f;
dist[5*Np+n] = 0.055555555555555555; //double(100*n)+5.f;
dist[6*Np+n] = 0.055555555555555555; //double(100*n)+6.f;
dist[7*Np+n] = 0.0277777777777778; //double(100*n)+7.f;
dist[8*Np+n] = 0.0277777777777778; //double(100*n)+8.f;
dist[9*Np+n] = 0.0277777777777778; //double(100*n)+9.f;
dist[10*Np+n] = 0.0277777777777778; //double(100*n)+10.f;
dist[11*Np+n] = 0.0277777777777778; //double(100*n)+11.f;
dist[12*Np+n] = 0.0277777777777778; //double(100*n)+12.f;
dist[13*Np+n] = 0.0277777777777778; //double(100*n)+13.f;
dist[14*Np+n] = 0.0277777777777778; //double(100*n)+14.f;
dist[15*Np+n] = 0.0277777777777778; //double(100*n)+15.f;
dist[16*Np+n] = 0.0277777777777778; //double(100*n)+16.f;
dist[17*Np+n] = 0.0277777777777778; //double(100*n)+17.f;
dist[18*Np+n] = 0.0277777777777778; //double(100*n)+18.f;
}
}
}
extern "C" void ScaLBL_D3Q19_AAeven_Greyscale(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double *Pressure){
hipLaunchKernelGGL(( dvc_ScaLBL_D3Q19_AAeven_Greyscale), dim3(NBLOCKS),dim3(NTHREADS) , 0, 0, dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,Pressure);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAeven_Greyscale: %s \n",hipGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_AAodd_Greyscale(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double *Pressure){
hipLaunchKernelGGL(( dvc_ScaLBL_D3Q19_AAodd_Greyscale), dim3(NBLOCKS),dim3(NTHREADS) , 0, 0, neighborList,dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,Pressure);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAodd_Greyscale: %s \n",hipGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_AAeven_Greyscale_IMRT(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double Den,double *Pressure){
hipLaunchKernelGGL(( dvc_ScaLBL_D3Q19_AAeven_Greyscale_IMRT), dim3(NBLOCKS),dim3(NTHREADS) , 0, 0, dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,Den,Pressure);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAeven_Greyscale_IMRT: %s \n",hipGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_AAodd_Greyscale_IMRT(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double Den,double *Pressure){
hipLaunchKernelGGL(( dvc_ScaLBL_D3Q19_AAodd_Greyscale_IMRT), dim3(NBLOCKS),dim3(NTHREADS) , 0, 0, neighborList,dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,Den,Pressure);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAodd_Greyscale_IMRT: %s \n",hipGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_AAodd_Greyscale_MRT(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double rho0,double *Pressure){
hipLaunchKernelGGL(( dvc_ScaLBL_D3Q19_AAodd_Greyscale_MRT), dim3(NBLOCKS),dim3(NTHREADS) , 0, 0, neighborList,dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,rho0,Pressure);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAodd_Greyscale_MRT: %s \n",hipGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_AAeven_Greyscale_MRT(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double rho0,double *Pressure){
hipLaunchKernelGGL(( dvc_ScaLBL_D3Q19_AAeven_Greyscale_MRT), dim3(NBLOCKS),dim3(NTHREADS) , 0, 0, dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,rho0,Pressure);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAeven_Greyscale_MRT: %s \n",hipGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_GreyIMRT_Init(double *dist, int Np, double Den){
hipLaunchKernelGGL(( dvc_ScaLBL_D3Q19_GreyIMRT_Init), dim3(NBLOCKS),dim3(NTHREADS) , 0, 0, dist, Np, Den);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_GreyIMRT_Init: %s \n",hipGetErrorString(err));
}
}
| 280bf5bcf66b23b73ee612071bf9412d73318d69.cu | /*
Copyright 2020 Equinor ASA
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#define NBLOCKS 1024
#define NTHREADS 256
__global__ void dvc_ScaLBL_D3Q19_AAeven_Greyscale(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity, double *Pressure){
int n;
// conserved momemnts
double rho,vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;
//double uu;
// non-conserved moments
double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
// q=0
f0 = dist[n];
f1 = dist[2*Np+n];
f2 = dist[1*Np+n];
f3 = dist[4*Np+n];
f4 = dist[3*Np+n];
f5 = dist[6*Np+n];
f6 = dist[5*Np+n];
f7 = dist[8*Np+n];
f8 = dist[7*Np+n];
f9 = dist[10*Np+n];
f10 = dist[9*Np+n];
f11 = dist[12*Np+n];
f12 = dist[11*Np+n];
f13 = dist[14*Np+n];
f14 = dist[13*Np+n];
f15 = dist[16*Np+n];
f16 = dist[15*Np+n];
f17 = dist[18*Np+n];
f18 = dist[17*Np+n];
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
rho = f0+f2+f1+f4+f3+f6+f5+f8+f7+f10+f9+f12+f11+f14+f13+f16+f15+f18+f17;
pressure = rho/porosity/3.0;
vx = (f1-f2+f7-f8+f9-f10+f11-f12+f13-f14)/rho+0.5*porosity*Gx;
vy = (f3-f4+f7-f8-f9+f10+f15-f16+f17-f18)/rho+0.5*porosity*Gy;
vz = (f5-f6+f11-f12-f13+f14+f15-f16-f17+f18)/rho+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the total force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = -porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx;
Fy = -porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy;
Fz = -porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz;
if (porosity==1.0){
Fx=Gx;
Fy=Gy;
Fz=Gz;
}
//------------------------ BGK collison where body force has higher-order terms ----------------------------------------------------------//
// // q=0
// dist[n] = f0*(1.0-rlx)+ rlx*0.3333333333333333*rho*(1. - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// + 0.3333333333333333*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 1
// dist[1*Np+n] = f1*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(3. + (6.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q=2
// dist[2*Np+n] = f2*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(-3. + (6.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 3
// dist[3*Np+n] = f3*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. + (6.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 4
// dist[4*Np+n] = f4*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. + (6.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 5
// dist[5*Np+n] = f5*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(3. + (6.*uz)/porosity));
//
// // q = 6
// dist[6*Np+n] = f6*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux+ uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(-3. + (6.*uz)/porosity));
//
// // q = 7
// dist[7*Np+n] = f7*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uy) + (4.5*(ux + uy)*(ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3. - (3.*ux)/porosity + (9.*(ux + uy))/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(ux + uy))/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 8
// dist[8*Np+n] = f8*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uy) + (4.5*(-ux - uy)*(-ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3. - (3.*ux)/porosity - (9.*(-ux - uy))/porosity) + Fy*(-3. - (9.*(-ux - uy))/porosity - (3.*uy)/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 9
// dist[9*Np+n] = f9*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uy) + (4.5*(ux - uy)*(ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3. - (3.*ux)/porosity + (9.*(ux - uy))/porosity) + Fy*(-3. - (9.*(ux - uy))/porosity - (3.*uy)/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 10
// dist[10*Np+n] = f10*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uy) + (4.5*(-ux + uy)*(-ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3. - (3.*ux)/porosity - (9.*(-ux + uy))/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(-ux + uy))/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 11
// dist[11*Np+n] = f11*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uz) + (4.5*(ux + uz)*(ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(3. - (3.*ux)/porosity + (9.*(ux + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(ux + uz))/porosity));
//
// // q = 12
// dist[12*Np+n] = f12*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uz) + (4.5*(-ux - uz)*(-ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(-3. - (3.*ux)/porosity - (9.*(-ux - uz))/porosity) +
// Fz*(-3. - (9.*(-ux - uz))/porosity - (3.*uz)/porosity));
//
// // q = 13
// dist[13*Np+n] = f13*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uz) + (4.5*(ux - uz)*(ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(3. - (3.*ux)/porosity + (9.*(ux - uz))/porosity) +
// Fz*(-3. - (9.*(ux - uz))/porosity - (3.*uz)/porosity));
//
// // q= 14
// dist[14*Np+n] = f14*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uz) + (4.5*(-ux + uz)*(-ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(-3. - (3.*ux)/porosity - (9.*(-ux + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(-ux + uz))/porosity));
//
// // q = 15
// dist[15*Np+n] = f15*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy + uz) + (4.5*(uy + uz)*(uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(uy + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(uy + uz))/porosity));
//
// // q = 16
// dist[16*Np+n] = f16*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy - uz) + (4.5*(-uy - uz)*(-uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. - (3.*uy)/porosity - (9.*(-uy - uz))/porosity) +
// Fz*(-3. - (9.*(-uy - uz))/porosity - (3.*uz)/porosity));
//
// // q = 17
// dist[17*Np+n] = f17*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy - uz) + (4.5*(uy - uz)*(uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(uy - uz))/porosity) +
// Fz*(-3. - (9.*(uy - uz))/porosity - (3.*uz)/porosity));
//
// // q = 18
// dist[18*Np+n] = f18*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy + uz) + (4.5*(-uy + uz)*(-uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. - (3.*uy)/porosity - (9.*(-uy + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(-uy + uz))/porosity));
//----------------------------------------------------------------------------------------------------------------------------------------//
//------------------------ BGK collison where body force has NO higher-order terms ----------------------------------------------------------//
// q=0
dist[n] = f0*(1.0-rlx)+ rlx*0.3333333333333333*rho*(1. - (1.5*(ux*ux + uy*uy + uz*uz))/porosity);
// q = 1
dist[1*Np+n] = f1*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(3.));
// q=2
dist[2*Np+n] = f2*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(-3.));
// q = 3
dist[3*Np+n] = f3*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fy*(3.));
// q = 4
dist[4*Np+n] = f4*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fy*(-3.));
// q = 5
dist[5*Np+n] = f5*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fz*(3.));
// q = 6
dist[6*Np+n] = f6*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux+ uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fz*(-3.));
// q = 7
dist[7*Np+n] = f7*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uy) + (4.5*(ux + uy)*(ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fy*(3.));
// q = 8
dist[8*Np+n] = f8*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uy) + (4.5*(-ux - uy)*(-ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fy*(-3.));
// q = 9
dist[9*Np+n] = f9*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uy) + (4.5*(ux - uy)*(ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fy*(-3.));
// q = 10
dist[10*Np+n] = f10*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uy) + (4.5*(-ux + uy)*(-ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fy*(3.));
// q = 11
dist[11*Np+n] = f11*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uz) + (4.5*(ux + uz)*(ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fz*(3.));
// q = 12
dist[12*Np+n] = f12*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uz) + (4.5*(-ux - uz)*(-ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fz*(-3.));
// q = 13
dist[13*Np+n] = f13*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uz) + (4.5*(ux - uz)*(ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fz*(-3.));
// q= 14
dist[14*Np+n] = f14*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uz) + (4.5*(-ux + uz)*(-ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fz*(3.));
// q = 15
dist[15*Np+n] = f15*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy + uz) + (4.5*(uy + uz)*(uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(3.) + Fz*(3.));
// q = 16
dist[16*Np+n] = f16*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy - uz) + (4.5*(-uy - uz)*(-uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(-3.) + Fz*(-3.));
// q = 17
dist[17*Np+n] = f17*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy - uz) + (4.5*(uy - uz)*(uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(3.) + Fz*(-3.));
// q = 18
dist[18*Np+n] = f18*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy + uz) + (4.5*(-uy + uz)*(-uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(-3.) + Fz*(3.));
//-------------------------------------------------------------------------------------------------------------------------------------------//
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_AAodd_Greyscale(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity, double *Pressure){
int n;
// conserved momemnts
double rho,vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;
//double uu;
// non-conserved moments
double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int nr1,nr2,nr3,nr4,nr5,nr6,nr7,nr8,nr9,nr10,nr11,nr12,nr13,nr14,nr15,nr16,nr17,nr18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
// q=0
f0 = dist[n];
// q=1
nr1 = neighborList[n]; // neighbor 2 ( > 10Np => odd part of dist)
f1 = dist[nr1]; // reading the f1 data into register fq
nr2 = neighborList[n+Np]; // neighbor 1 ( < 10Np => even part of dist)
f2 = dist[nr2]; // reading the f2 data into register fq
// q=3
nr3 = neighborList[n+2*Np]; // neighbor 4
f3 = dist[nr3];
// q = 4
nr4 = neighborList[n+3*Np]; // neighbor 3
f4 = dist[nr4];
// q=5
nr5 = neighborList[n+4*Np];
f5 = dist[nr5];
// q = 6
nr6 = neighborList[n+5*Np];
f6 = dist[nr6];
// q=7
nr7 = neighborList[n+6*Np];
f7 = dist[nr7];
// q = 8
nr8 = neighborList[n+7*Np];
f8 = dist[nr8];
// q=9
nr9 = neighborList[n+8*Np];
f9 = dist[nr9];
// q = 10
nr10 = neighborList[n+9*Np];
f10 = dist[nr10];
// q=11
nr11 = neighborList[n+10*Np];
f11 = dist[nr11];
// q=12
nr12 = neighborList[n+11*Np];
f12 = dist[nr12];
// q=13
nr13 = neighborList[n+12*Np];
f13 = dist[nr13];
// q=14
nr14 = neighborList[n+13*Np];
f14 = dist[nr14];
// q=15
nr15 = neighborList[n+14*Np];
f15 = dist[nr15];
// q=16
nr16 = neighborList[n+15*Np];
f16 = dist[nr16];
// q=17
//fq = dist[18*Np+n];
nr17 = neighborList[n+16*Np];
f17 = dist[nr17];
// q=18
nr18 = neighborList[n+17*Np];
f18 = dist[nr18];
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
rho = f0+f2+f1+f4+f3+f6+f5+f8+f7+f10+f9+f12+f11+f14+f13+f16+f15+f18+f17;
pressure = rho/porosity/3.0;
vx = (f1-f2+f7-f8+f9-f10+f11-f12+f13-f14)/rho+0.5*porosity*Gx;
vy = (f3-f4+f7-f8-f9+f10+f15-f16+f17-f18)/rho+0.5*porosity*Gy;
vz = (f5-f6+f11-f12-f13+f14+f15-f16-f17+f18)/rho+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the body force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = -porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx;
Fy = -porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy;
Fz = -porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz;
if (porosity==1.0){
Fx=Gx;
Fy=Gy;
Fz=Gz;
}
//------------------------ BGK collison where body force has higher-order terms ----------------------------------------------------------//
// // q=0
// dist[n] = f0*(1.0-rlx) + rlx*0.3333333333333333*rho*(1. - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// + 0.3333333333333333*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 1
// dist[nr2] = f1*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(3. + (6.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q=2
// dist[nr1] = f2*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(-3. + (6.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 3
// dist[nr4] = f3*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. + (6.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 4
// dist[nr3] = f4*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. + (6.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 5
// dist[nr6] = f5*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(3. + (6.*uz)/porosity));
//
// // q = 6
// dist[nr5] = f6*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux+ uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(-3. + (6.*uz)/porosity));
//
// // q = 7
// dist[nr8] = f7*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uy) + (4.5*(ux + uy)*(ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3. - (3.*ux)/porosity + (9.*(ux + uy))/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(ux + uy))/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 8
// dist[nr7] = f8*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uy) + (4.5*(-ux - uy)*(-ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3. - (3.*ux)/porosity - (9.*(-ux - uy))/porosity) + Fy*(-3. - (9.*(-ux - uy))/porosity - (3.*uy)/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 9
// dist[nr10] = f9*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uy) + (4.5*(ux - uy)*(ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3. - (3.*ux)/porosity + (9.*(ux - uy))/porosity) + Fy*(-3. - (9.*(ux - uy))/porosity - (3.*uy)/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 10
// dist[nr9] = f10*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uy) + (4.5*(-ux + uy)*(-ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3. - (3.*ux)/porosity - (9.*(-ux + uy))/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(-ux + uy))/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 11
// dist[nr12] = f11*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uz) + (4.5*(ux + uz)*(ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(3. - (3.*ux)/porosity + (9.*(ux + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(ux + uz))/porosity));
//
// // q = 12
// dist[nr11] = f12*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uz) + (4.5*(-ux - uz)*(-ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(-3. - (3.*ux)/porosity - (9.*(-ux - uz))/porosity) +
// Fz*(-3. - (9.*(-ux - uz))/porosity - (3.*uz)/porosity));
//
// // q = 13
// dist[nr14] = f13*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uz) + (4.5*(ux - uz)*(ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(3. - (3.*ux)/porosity + (9.*(ux - uz))/porosity) +
// Fz*(-3. - (9.*(ux - uz))/porosity - (3.*uz)/porosity));
//
// // q= 14
// dist[nr13] = f14*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uz) + (4.5*(-ux + uz)*(-ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(-3. - (3.*ux)/porosity - (9.*(-ux + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(-ux + uz))/porosity));
//
// // q = 15
// dist[nr16] = f15*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy + uz) + (4.5*(uy + uz)*(uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(uy + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(uy + uz))/porosity));
//
// // q = 16
// dist[nr15] = f16*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy - uz) + (4.5*(-uy - uz)*(-uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. - (3.*uy)/porosity - (9.*(-uy - uz))/porosity) +
// Fz*(-3. - (9.*(-uy - uz))/porosity - (3.*uz)/porosity));
//
// // q = 17
// dist[nr18] = f17*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy - uz) + (4.5*(uy - uz)*(uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(uy - uz))/porosity) +
// Fz*(-3. - (9.*(uy - uz))/porosity - (3.*uz)/porosity));
//
// // q = 18
// dist[nr17] = f18*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy + uz) + (4.5*(-uy + uz)*(-uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. - (3.*uy)/porosity - (9.*(-uy + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(-uy + uz))/porosity));
//----------------------------------------------------------------------------------------------------------------------------------------//
//------------------------ BGK collison where body force has NO higher-order terms ----------------------------------------------------------//
// q=0
dist[n] = f0*(1.0-rlx) + rlx*0.3333333333333333*rho*(1. - (1.5*(ux*ux + uy*uy + uz*uz))/porosity);
// q = 1
dist[nr2] = f1*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(3.));
// q=2
dist[nr1] = f2*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(-3.));
// q = 3
dist[nr4] = f3*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fy*(3.));
// q = 4
dist[nr3] = f4*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fy*(-3.));
// q = 5
dist[nr6] = f5*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fz*(3.));
// q = 6
dist[nr5] = f6*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux+ uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fz*(-3.));
// q = 7
dist[nr8] = f7*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uy) + (4.5*(ux + uy)*(ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fy*(3.));
// q = 8
dist[nr7] = f8*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uy) + (4.5*(-ux - uy)*(-ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fy*(-3.));
// q = 9
dist[nr10] = f9*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uy) + (4.5*(ux - uy)*(ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fy*(-3.));
// q = 10
dist[nr9] = f10*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uy) + (4.5*(-ux + uy)*(-ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fy*(3.));
// q = 11
dist[nr12] = f11*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uz) + (4.5*(ux + uz)*(ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fz*(3.));
// q = 12
dist[nr11] = f12*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uz) + (4.5*(-ux - uz)*(-ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fz*(-3.));
// q = 13
dist[nr14] = f13*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uz) + (4.5*(ux - uz)*(ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fz*(-3.));
// q= 14
dist[nr13] = f14*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uz) + (4.5*(-ux + uz)*(-ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fz*(3.));
// q = 15
dist[nr16] = f15*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy + uz) + (4.5*(uy + uz)*(uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(3.) + Fz*(3.));
// q = 16
dist[nr15] = f16*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy - uz) + (4.5*(-uy - uz)*(-uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(-3.) + Fz*(-3.));
// q = 17
dist[nr18] = f17*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy - uz) + (4.5*(uy - uz)*(uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(3.) + Fz*(-3.));
// q = 18
dist[nr17] = f18*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy + uz) + (4.5*(-uy + uz)*(-uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(-3.) + Fz*(3.));
//-------------------------------------------------------------------------------------------------------------------------------------------//
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_AAeven_Greyscale_IMRT(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity, double Den, double *Pressure){
int n;
double vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;//defined for this incompressible model
// conserved momemnts
double jx,jy,jz;
// non-conserved moments
double m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
double fq;
//double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
double rlx_setA = rlx;
double rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
const double mrt_V1=0.05263157894736842;
const double mrt_V2=0.012531328320802;
const double mrt_V3=0.04761904761904762;
const double mrt_V4=0.004594820384294068;
const double mrt_V5=0.01587301587301587;
const double mrt_V6=0.0555555555555555555555555;
const double mrt_V7=0.02777777777777778;
const double mrt_V8=0.08333333333333333;
const double mrt_V9=0.003341687552213868;
const double mrt_V10=0.003968253968253968;
const double mrt_V11=0.01388888888888889;
const double mrt_V12=0.04166666666666666;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
//........................................................................
// READ THE DISTRIBUTIONS
// (read from opposite array due to previous swap operation)
//........................................................................
// q=0
fq = dist[n];
m1 = -30.0*fq;
m2 = 12.0*fq;
// q=1
fq = dist[2*Np+n];
pressure = fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jx = fq;
m4 = -4.0*fq;
m9 = 2.0*fq;
m10 = -4.0*fq;
// f2 = dist[10*Np+n];
fq = dist[1*Np+n];
pressure += fq;
m1 -= 11.0*(fq);
m2 -= 4.0*(fq);
jx -= fq;
m4 += 4.0*(fq);
m9 += 2.0*(fq);
m10 -= 4.0*(fq);
// q=3
fq = dist[4*Np+n];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy = fq;
m6 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 = fq;
m12 = -2.0*fq;
// q = 4
fq = dist[3*Np+n];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy -= fq;
m6 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 += fq;
m12 -= 2.0*fq;
// q=5
fq = dist[6*Np+n];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz = fq;
m8 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q = 6
fq = dist[5*Np+n];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz -= fq;
m8 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q=7
fq = dist[8*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 = fq;
m16 = fq;
m17 = -fq;
// q = 8
fq = dist[7*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 += fq;
m16 -= fq;
m17 += fq;
// q=9
fq = dist[10*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 += fq;
m17 += fq;
// q = 10
fq = dist[9*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 -= fq;
m17 -= fq;
// q=11
fq = dist[12*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 = fq;
m16 -= fq;
m18 = fq;
// q=12
fq = dist[11*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 += fq;
m16 += fq;
m18 -= fq;
// q=13
fq = dist[14*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 -= fq;
m18 -= fq;
// q=14
fq = dist[13*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 += fq;
m18 += fq;
// q=15
fq = dist[16*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 = fq;
m17 += fq;
m18 -= fq;
// q=16
fq = dist[15*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 += fq;
m17 -= fq;
m18 += fq;
// q=17
fq = dist[18*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 += fq;
m18 += fq;
// q=18
fq = dist[17*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 -= fq;
m18 -= fq;
//---------------------------------------------------------------------//
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
vx = jx/Den+0.5*porosity*Gx;
vy = jy/Den+0.5*porosity*Gy;
vz = jz/Den+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the total force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = Den*(-porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx);
Fy = Den*(-porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy);
Fz = Den*(-porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz);
if (porosity==1.0){
Fx=Den*Gx;
Fy=Den*Gy;
Fz=Den*Gz;
}
//Calculate pressure for Incompressible-MRT model
pressure=0.5/porosity*(pressure-0.5*Den*u_mag*u_mag/porosity);
// //..............carry out relaxation process...............................................
// m1 = m1 + rlx_setA*((-30*Den+19*Den*(ux*ux+uy*uy+uz*uz)/porosity + 57*pressure*porosity) - m1)
// + (1-0.5*rlx_setA)*38*(Fx*ux+Fy*uy+Fz*uz)/porosity;
// m2 = m2 + rlx_setA*((12*Den - 5.5*Den*(ux*ux+uy*uy+uz*uz)/porosity-27*pressure*porosity) - m2)
// + (1-0.5*rlx_setA)*11*(-Fx*ux-Fy*uy-Fz*uz)/porosity;
// jx = jx + Fx;
// m4 = m4 + rlx_setB*((-0.6666666666666666*ux*Den) - m4)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
// jy = jy + Fy;
// m6 = m6 + rlx_setB*((-0.6666666666666666*uy*Den) - m6)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
// jz = jz + Fz;
// m8 = m8 + rlx_setB*((-0.6666666666666666*uz*Den) - m8)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
// m9 = m9 + rlx_setA*((Den*(2*ux*ux-uy*uy-uz*uz)/porosity) - m9)
// + (1-0.5*rlx_setA)*(4*Fx*ux-2*Fy*uy-2*Fz*uz)/porosity;
// m10 = m10 + rlx_setA*(-0.5*Den*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10)
// + (1-0.5*rlx_setA)*(-2*Fx*ux+Fy*uy+Fz*uz)/porosity;
// m11 = m11 + rlx_setA*((Den*(uy*uy-uz*uz)/porosity) - m11)
// + (1-0.5*rlx_setA)*(2*Fy*uy-2*Fz*uz)/porosity;
// m12 = m12 + rlx_setA*(-0.5*(Den*(uy*uy-uz*uz)/porosity)- m12)
// + (1-0.5*rlx_setA)*(-Fy*uy+Fz*uz)/porosity;
// m13 = m13 + rlx_setA*((Den*ux*uy/porosity) - m13)
// + (1-0.5*rlx_setA)*(Fy*ux+Fx*uy)/porosity;
// m14 = m14 + rlx_setA*((Den*uy*uz/porosity) - m14)
// + (1-0.5*rlx_setA)*(Fz*uy+Fy*uz)/porosity;
// m15 = m15 + rlx_setA*((Den*ux*uz/porosity) - m15)
// + (1-0.5*rlx_setA)*(Fz*ux+Fx*uz)/porosity;
// m16 = m16 + rlx_setB*( - m16);
// m17 = m17 + rlx_setB*( - m17);
// m18 = m18 + rlx_setB*( - m18);
// //.......................................................................................................
//-------------------- IMRT collison where body force has NO higher-order terms -------------//
//..............carry out relaxation process...............................................
m1 = m1 + rlx_setA*((-30*Den+19*Den*(ux*ux+uy*uy+uz*uz)/porosity + 57*pressure*porosity) - m1);
m2 = m2 + rlx_setA*((12*Den - 5.5*Den*(ux*ux+uy*uy+uz*uz)/porosity-27*pressure*porosity) - m2);
jx = jx + Fx;
m4 = m4 + rlx_setB*((-0.6666666666666666*ux*Den) - m4)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
jy = jy + Fy;
m6 = m6 + rlx_setB*((-0.6666666666666666*uy*Den) - m6)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
jz = jz + Fz;
m8 = m8 + rlx_setB*((-0.6666666666666666*uz*Den) - m8)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
m9 = m9 + rlx_setA*((Den*(2*ux*ux-uy*uy-uz*uz)/porosity) - m9);
m10 = m10 + rlx_setA*(-0.5*Den*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10);
m11 = m11 + rlx_setA*((Den*(uy*uy-uz*uz)/porosity) - m11);
m12 = m12 + rlx_setA*(-0.5*(Den*(uy*uy-uz*uz)/porosity)- m12);
m13 = m13 + rlx_setA*((Den*ux*uy/porosity) - m13);
m14 = m14 + rlx_setA*((Den*uy*uz/porosity) - m14);
m15 = m15 + rlx_setA*((Den*ux*uz/porosity) - m15);
m16 = m16 + rlx_setB*( - m16);
m17 = m17 + rlx_setB*( - m17);
m18 = m18 + rlx_setB*( - m18);
//.......................................................................................................
//.................inverse transformation......................................................
// q=0
fq = mrt_V1*Den-mrt_V2*m1+mrt_V3*m2;
dist[n] = fq;
// q = 1
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jx-m4)+mrt_V6*(m9-m10);
dist[1*Np+n] = fq;
// q=2
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m4-jx)+mrt_V6*(m9-m10);
dist[2*Np+n] = fq;
// q = 3
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jy-m6)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
dist[3*Np+n] = fq;
// q = 4
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m6-jy)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
dist[4*Np+n] = fq;
// q = 5
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jz-m8)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
dist[5*Np+n] = fq;
// q = 6
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m8-jz)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
dist[6*Np+n] = fq;
// q = 7
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jy)+0.025*(m4+m6)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m16-m17);
dist[7*Np+n] = fq;
// q = 8
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jy)-0.025*(m4+m6) +mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m17-m16);
dist[8*Np+n] = fq;
// q = 9
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jy)+0.025*(m4-m6)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13+0.125*(m16+m17);
dist[9*Np+n] = fq;
// q = 10
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jx)+0.025*(m6-m4)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13-0.125*(m16+m17);
dist[10*Np+n] = fq;
// q = 11
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jz)+0.025*(m4+m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m18-m16);
dist[11*Np+n] = fq;
// q = 12
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jz)-0.025*(m4+m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m16-m18);
dist[12*Np+n] = fq;
// q = 13
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jz)+0.025*(m4-m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12-0.25*m15-0.125*(m16+m18);
dist[13*Np+n] = fq;
// q= 14
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jz-jx)+0.025*(m8-m4)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12-0.25*m15+0.125*(m16+m18);
dist[14*Np+n] = fq;
// q = 15
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy+jz)+0.025*(m6+m8)-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m17-m18);
dist[15*Np+n] = fq;
// q = 16
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jy+jz)-0.025*(m6+m8)-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m18-m17);
dist[16*Np+n] = fq;
// q = 17
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jz)+0.025*(m6-m8)-mrt_V6*m9-mrt_V7*m10-0.25*m14+0.125*(m17+m18);
dist[17*Np+n] = fq;
// q = 18
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jz-jy)+0.025*(m8-m6)-mrt_V6*m9-mrt_V7*m10-0.25*m14-0.125*(m17+m18);
dist[18*Np+n] = fq;
//........................................................................
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_AAodd_Greyscale_IMRT(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity,double Den, double *Pressure){
int n, nread;
double vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;//defined for this incompressible model
// conserved momemnts
double jx,jy,jz;
// non-conserved moments
double m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
double fq;
//double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
double rlx_setA = rlx;
double rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
const double mrt_V1=0.05263157894736842;
const double mrt_V2=0.012531328320802;
const double mrt_V3=0.04761904761904762;
const double mrt_V4=0.004594820384294068;
const double mrt_V5=0.01587301587301587;
const double mrt_V6=0.0555555555555555555555555;
const double mrt_V7=0.02777777777777778;
const double mrt_V8=0.08333333333333333;
const double mrt_V9=0.003341687552213868;
const double mrt_V10=0.003968253968253968;
const double mrt_V11=0.01388888888888889;
const double mrt_V12=0.04166666666666666;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
//........................................................................
// READ THE DISTRIBUTIONS
// (read from opposite array due to previous swap operation)
//........................................................................
// q=0
fq = dist[n];
m1 = -30.0*fq;
m2 = 12.0*fq;
// q=1
nread = neighborList[n]; // neighbor 2 ( > 10Np => odd part of dist)
fq = dist[nread]; // reading the f1 data into register fq
pressure = fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jx = fq;
m4 = -4.0*fq;
m9 = 2.0*fq;
m10 = -4.0*fq;
// q=2
nread = neighborList[n+Np]; // neighbor 1 ( < 10Np => even part of dist)
fq = dist[nread]; // reading the f2 data into register fq
pressure += fq;
m1 -= 11.0*(fq);
m2 -= 4.0*(fq);
jx -= fq;
m4 += 4.0*(fq);
m9 += 2.0*(fq);
m10 -= 4.0*(fq);
// q=3
nread = neighborList[n+2*Np]; // neighbor 4
fq = dist[nread];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy = fq;
m6 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 = fq;
m12 = -2.0*fq;
// q = 4
nread = neighborList[n+3*Np]; // neighbor 3
fq = dist[nread];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy -= fq;
m6 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 += fq;
m12 -= 2.0*fq;
// q=5
nread = neighborList[n+4*Np];
fq = dist[nread];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz = fq;
m8 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q = 6
nread = neighborList[n+5*Np];
fq = dist[nread];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz -= fq;
m8 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q=7
nread = neighborList[n+6*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 = fq;
m16 = fq;
m17 = -fq;
// q = 8
nread = neighborList[n+7*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 += fq;
m16 -= fq;
m17 += fq;
// q=9
nread = neighborList[n+8*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 += fq;
m17 += fq;
// q = 10
nread = neighborList[n+9*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 -= fq;
m17 -= fq;
// q=11
nread = neighborList[n+10*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 = fq;
m16 -= fq;
m18 = fq;
// q=12
nread = neighborList[n+11*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 += fq;
m16 += fq;
m18 -= fq;
// q=13
nread = neighborList[n+12*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 -= fq;
m18 -= fq;
// q=14
nread = neighborList[n+13*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 += fq;
m18 += fq;
// q=15
nread = neighborList[n+14*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 = fq;
m17 += fq;
m18 -= fq;
// q=16
nread = neighborList[n+15*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 += fq;
m17 -= fq;
m18 += fq;
// q=17
nread = neighborList[n+16*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 += fq;
m18 += fq;
// q=18
nread = neighborList[n+17*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 -= fq;
m18 -= fq;
//---------------------------------------------------------------------//
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
vx = jx/Den+0.5*porosity*Gx;
vy = jy/Den+0.5*porosity*Gy;
vz = jz/Den+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the total force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = Den*(-porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx);
Fy = Den*(-porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy);
Fz = Den*(-porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz);
if (porosity==1.0){
Fx=Den*Gx;
Fy=Den*Gy;
Fz=Den*Gz;
}
//Calculate pressure for Incompressible-MRT model
pressure=0.5/porosity*(pressure-0.5*Den*u_mag*u_mag/porosity);
// //..............carry out relaxation process...............................................
// m1 = m1 + rlx_setA*((-30*Den+19*Den*(ux*ux+uy*uy+uz*uz)/porosity + 57*pressure*porosity) - m1)
// + (1-0.5*rlx_setA)*38*(Fx*ux+Fy*uy+Fz*uz)/porosity;
// m2 = m2 + rlx_setA*((12*Den - 5.5*Den*(ux*ux+uy*uy+uz*uz)/porosity-27*pressure*porosity) - m2)
// + (1-0.5*rlx_setA)*11*(-Fx*ux-Fy*uy-Fz*uz)/porosity;
// jx = jx + Fx;
// m4 = m4 + rlx_setB*((-0.6666666666666666*ux*Den) - m4)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
// jy = jy + Fy;
// m6 = m6 + rlx_setB*((-0.6666666666666666*uy*Den) - m6)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
// jz = jz + Fz;
// m8 = m8 + rlx_setB*((-0.6666666666666666*uz*Den) - m8)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
// m9 = m9 + rlx_setA*((Den*(2*ux*ux-uy*uy-uz*uz)/porosity) - m9)
// + (1-0.5*rlx_setA)*(4*Fx*ux-2*Fy*uy-2*Fz*uz)/porosity;
// m10 = m10 + rlx_setA*(-0.5*Den*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10)
// + (1-0.5*rlx_setA)*(-2*Fx*ux+Fy*uy+Fz*uz)/porosity;
// m11 = m11 + rlx_setA*((Den*(uy*uy-uz*uz)/porosity) - m11)
// + (1-0.5*rlx_setA)*(2*Fy*uy-2*Fz*uz)/porosity;
// m12 = m12 + rlx_setA*(-0.5*(Den*(uy*uy-uz*uz)/porosity)- m12)
// + (1-0.5*rlx_setA)*(-Fy*uy+Fz*uz)/porosity;
// m13 = m13 + rlx_setA*((Den*ux*uy/porosity) - m13)
// + (1-0.5*rlx_setA)*(Fy*ux+Fx*uy)/porosity;
// m14 = m14 + rlx_setA*((Den*uy*uz/porosity) - m14)
// + (1-0.5*rlx_setA)*(Fz*uy+Fy*uz)/porosity;
// m15 = m15 + rlx_setA*((Den*ux*uz/porosity) - m15)
// + (1-0.5*rlx_setA)*(Fz*ux+Fx*uz)/porosity;
// m16 = m16 + rlx_setB*( - m16);
// m17 = m17 + rlx_setB*( - m17);
// m18 = m18 + rlx_setB*( - m18);
// //.......................................................................................................
//-------------------- IMRT collison where body force has NO higher-order terms -------------//
//..............carry out relaxation process...............................................
m1 = m1 + rlx_setA*((-30*Den+19*Den*(ux*ux+uy*uy+uz*uz)/porosity + 57*pressure*porosity) - m1);
m2 = m2 + rlx_setA*((12*Den - 5.5*Den*(ux*ux+uy*uy+uz*uz)/porosity-27*pressure*porosity) - m2);
jx = jx + Fx;
m4 = m4 + rlx_setB*((-0.6666666666666666*ux*Den) - m4)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
jy = jy + Fy;
m6 = m6 + rlx_setB*((-0.6666666666666666*uy*Den) - m6)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
jz = jz + Fz;
m8 = m8 + rlx_setB*((-0.6666666666666666*uz*Den) - m8)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
m9 = m9 + rlx_setA*((Den*(2*ux*ux-uy*uy-uz*uz)/porosity) - m9);
m10 = m10 + rlx_setA*(-0.5*Den*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10);
m11 = m11 + rlx_setA*((Den*(uy*uy-uz*uz)/porosity) - m11);
m12 = m12 + rlx_setA*(-0.5*(Den*(uy*uy-uz*uz)/porosity)- m12);
m13 = m13 + rlx_setA*((Den*ux*uy/porosity) - m13);
m14 = m14 + rlx_setA*((Den*uy*uz/porosity) - m14);
m15 = m15 + rlx_setA*((Den*ux*uz/porosity) - m15);
m16 = m16 + rlx_setB*( - m16);
m17 = m17 + rlx_setB*( - m17);
m18 = m18 + rlx_setB*( - m18);
//.......................................................................................................
//.................inverse transformation......................................................
// q=0
fq = mrt_V1*Den-mrt_V2*m1+mrt_V3*m2;
dist[n] = fq;
// q = 1
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jx-m4)+mrt_V6*(m9-m10);
nread = neighborList[n+Np];
dist[nread] = fq;
// q=2
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m4-jx)+mrt_V6*(m9-m10);
nread = neighborList[n];
dist[nread] = fq;
// q = 3
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jy-m6)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
nread = neighborList[n+3*Np];
dist[nread] = fq;
// q = 4
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m6-jy)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
nread = neighborList[n+2*Np];
dist[nread] = fq;
// q = 5
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jz-m8)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
nread = neighborList[n+5*Np];
dist[nread] = fq;
// q = 6
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m8-jz)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
nread = neighborList[n+4*Np];
dist[nread] = fq;
// q = 7
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jy)+0.025*(m4+m6)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m16-m17);
nread = neighborList[n+7*Np];
dist[nread] = fq;
// q = 8
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jy)-0.025*(m4+m6) +mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m17-m16);
nread = neighborList[n+6*Np];
dist[nread] = fq;
// q = 9
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jy)+0.025*(m4-m6)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13+0.125*(m16+m17);
nread = neighborList[n+9*Np];
dist[nread] = fq;
// q = 10
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jx)+0.025*(m6-m4)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13-0.125*(m16+m17);
nread = neighborList[n+8*Np];
dist[nread] = fq;
// q = 11
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jz)+0.025*(m4+m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m18-m16);
nread = neighborList[n+11*Np];
dist[nread] = fq;
// q = 12
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jz)-0.025*(m4+m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m16-m18);
nread = neighborList[n+10*Np];
dist[nread]= fq;
// q = 13
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jz)+0.025*(m4-m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12-0.25*m15-0.125*(m16+m18);
nread = neighborList[n+13*Np];
dist[nread] = fq;
// q= 14
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jz-jx)+0.025*(m8-m4)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12-0.25*m15+0.125*(m16+m18);
nread = neighborList[n+12*Np];
dist[nread] = fq;
// q = 15
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy+jz)+0.025*(m6+m8)-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m17-m18);
nread = neighborList[n+15*Np];
dist[nread] = fq;
// q = 16
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jy+jz)-0.025*(m6+m8)-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m18-m17);
nread = neighborList[n+14*Np];
dist[nread] = fq;
// q = 17
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jz)+0.025*(m6-m8)-mrt_V6*m9-mrt_V7*m10-0.25*m14+0.125*(m17+m18);
nread = neighborList[n+17*Np];
dist[nread] = fq;
// q = 18
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jz-jy)+0.025*(m8-m6)-mrt_V6*m9-mrt_V7*m10-0.25*m14-0.125*(m17+m18);
nread = neighborList[n+16*Np];
dist[nread] = fq;
//........................................................................
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_AAodd_Greyscale_MRT(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity,double rho0, double *Pressure){
int n, nread;
int nr1,nr2,nr3,nr4,nr5,nr6;
int nr7,nr8,nr9,nr10;
int nr11,nr12,nr13,nr14;
double vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;//defined for this incompressible model
// conserved momemnts
double rho,jx,jy,jz;
// non-conserved moments
double m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
double fq;
//double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
double rlx_setA = rlx;
double rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
const double mrt_V1=0.05263157894736842;
const double mrt_V2=0.012531328320802;
const double mrt_V3=0.04761904761904762;
const double mrt_V4=0.004594820384294068;
const double mrt_V5=0.01587301587301587;
const double mrt_V6=0.0555555555555555555555555;
const double mrt_V7=0.02777777777777778;
const double mrt_V8=0.08333333333333333;
const double mrt_V9=0.003341687552213868;
const double mrt_V10=0.003968253968253968;
const double mrt_V11=0.01388888888888889;
const double mrt_V12=0.04166666666666666;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
//........................................................................
// READ THE DISTRIBUTIONS
// (read from opposite array due to previous swap operation)
//........................................................................
// q=0
fq = dist[n];
rho = fq;
m1 = -30.0*fq;
m2 = 12.0*fq;
// q=1
//nread = neighborList[n]; // neighbor 2
//fq = dist[nread]; // reading the f1 data into register fq
nr1 = neighborList[n];
fq = dist[nr1]; // reading the f1 data into register fq
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jx = fq;
m4 = -4.0*fq;
m9 = 2.0*fq;
m10 = -4.0*fq;
// f2 = dist[10*Np+n];
//nread = neighborList[n+Np]; // neighbor 1 ( < 10Np => even part of dist)
//fq = dist[nread]; // reading the f2 data into register fq
nr2 = neighborList[n+Np]; // neighbor 1 ( < 10Np => even part of dist)
fq = dist[nr2]; // reading the f2 data into register fq
rho += fq;
m1 -= 11.0*(fq);
m2 -= 4.0*(fq);
jx -= fq;
m4 += 4.0*(fq);
m9 += 2.0*(fq);
m10 -= 4.0*(fq);
// q=3
//nread = neighborList[n+2*Np]; // neighbor 4
//fq = dist[nread];
nr3 = neighborList[n+2*Np]; // neighbor 4
fq = dist[nr3];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy = fq;
m6 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 = fq;
m12 = -2.0*fq;
// q = 4
//nread = neighborList[n+3*Np]; // neighbor 3
//fq = dist[nread];
nr4 = neighborList[n+3*Np]; // neighbor 3
fq = dist[nr4];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy -= fq;
m6 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 += fq;
m12 -= 2.0*fq;
// q=5
//nread = neighborList[n+4*Np];
//fq = dist[nread];
nr5 = neighborList[n+4*Np];
fq = dist[nr5];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz = fq;
m8 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q = 6
//nread = neighborList[n+5*Np];
//fq = dist[nread];
nr6 = neighborList[n+5*Np];
fq = dist[nr6];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz -= fq;
m8 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q=7
//nread = neighborList[n+6*Np];
//fq = dist[nread];
nr7 = neighborList[n+6*Np];
fq = dist[nr7];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 = fq;
m16 = fq;
m17 = -fq;
// q = 8
//nread = neighborList[n+7*Np];
//fq = dist[nread];
nr8 = neighborList[n+7*Np];
fq = dist[nr8];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 += fq;
m16 -= fq;
m17 += fq;
// q=9
//nread = neighborList[n+8*Np];
//fq = dist[nread];
nr9 = neighborList[n+8*Np];
fq = dist[nr9];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 += fq;
m17 += fq;
// q = 10
//nread = neighborList[n+9*Np];
//fq = dist[nread];
nr10 = neighborList[n+9*Np];
fq = dist[nr10];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 -= fq;
m17 -= fq;
// q=11
//nread = neighborList[n+10*Np];
//fq = dist[nread];
nr11 = neighborList[n+10*Np];
fq = dist[nr11];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 = fq;
m16 -= fq;
m18 = fq;
// q=12
//nread = neighborList[n+11*Np];
//fq = dist[nread];
nr12 = neighborList[n+11*Np];
fq = dist[nr12];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 += fq;
m16 += fq;
m18 -= fq;
// q=13
//nread = neighborList[n+12*Np];
//fq = dist[nread];
nr13 = neighborList[n+12*Np];
fq = dist[nr13];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 -= fq;
m18 -= fq;
// q=14
//nread = neighborList[n+13*Np];
//fq = dist[nread];
nr14 = neighborList[n+13*Np];
fq = dist[nr14];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 += fq;
m18 += fq;
// q=15
nread = neighborList[n+14*Np];
fq = dist[nread];
//fq = dist[17*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 = fq;
m17 += fq;
m18 -= fq;
// q=16
nread = neighborList[n+15*Np];
fq = dist[nread];
//fq = dist[8*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 += fq;
m17 -= fq;
m18 += fq;
// q=17
//fq = dist[18*Np+n];
nread = neighborList[n+16*Np];
fq = dist[nread];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 += fq;
m18 += fq;
// q=18
nread = neighborList[n+17*Np];
fq = dist[nread];
//fq = dist[9*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 -= fq;
m18 -= fq;
//---------------------------------------------------------------------//
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
vx = jx/rho0+0.5*porosity*Gx;
vy = jy/rho0+0.5*porosity*Gy;
vz = jz/rho0+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the total force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = rho0*(-porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx);
Fy = rho0*(-porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy);
Fz = rho0*(-porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz);
if (porosity==1.0){
Fx=rho0*Gx;
Fy=rho0*Gy;
Fz=rho0*Gz;
}
//Calculate pressure for MRT model
pressure=rho/3.f;
//-------------------- MRT collison where body force has NO higher-order terms -------------//
m1 = m1 + rlx_setA*((19*(ux*ux+uy*uy+uz*uz)*rho0/porosity - 11*rho) - m1);
m2 = m2 + rlx_setA*((3*rho - 5.5*(ux*ux+uy*uy+uz*uz)*rho0/porosity) - m2);
jx = jx + Fx;
m4 = m4 + rlx_setB*((-0.6666666666666666*ux*rho0)- m4)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
jy = jy + Fy;
m6 = m6 + rlx_setB*((-0.6666666666666666*uy*rho0)- m6)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
jz = jz + Fz;
m8 = m8 + rlx_setB*((-0.6666666666666666*uz*rho0)- m8)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
m9 = m9 + rlx_setA*(((2*ux*ux-uy*uy-uz*uz)*rho0/porosity) - m9);
m10 = m10 + rlx_setA*( - m10);
//m10 = m10 + rlx_setA*(-0.5*rho0*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10);
m11 = m11 + rlx_setA*(((uy*uy-uz*uz)*rho0/porosity) - m11);
m12 = m12 + rlx_setA*( - m12);
//m12 = m12 + rlx_setA*(-0.5*(rho0*(uy*uy-uz*uz)/porosity)- m12);
m13 = m13 + rlx_setA*( (ux*uy*rho0/porosity) - m13);
m14 = m14 + rlx_setA*( (uy*uz*rho0/porosity) - m14);
m15 = m15 + rlx_setA*( (ux*uz*rho0/porosity) - m15);
m16 = m16 + rlx_setB*( - m16);
m17 = m17 + rlx_setB*( - m17);
m18 = m18 + rlx_setB*( - m18);
//.......................................................................................................
//.................inverse transformation......................................................
// q=0
fq = mrt_V1*rho-mrt_V2*m1+mrt_V3*m2;
dist[n] = fq;
// q = 1
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jx-m4)+mrt_V6*(m9-m10);
//nread = neighborList[n+Np];
dist[nr2] = fq;
// q=2
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m4-jx)+mrt_V6*(m9-m10);
//nread = neighborList[n];
dist[nr1] = fq;
// q = 3
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jy-m6)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
//nread = neighborList[n+3*Np];
dist[nr4] = fq;
// q = 4
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m6-jy)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
//nread = neighborList[n+2*Np];
dist[nr3] = fq;
// q = 5
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jz-m8)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
//nread = neighborList[n+5*Np];
dist[nr6] = fq;
// q = 6
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m8-jz)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
//nread = neighborList[n+4*Np];
dist[nr5] = fq;
// q = 7
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jy)+0.025*(m4+m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m16-m17);
//nread = neighborList[n+7*Np];
dist[nr8] = fq;
// q = 8
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jy)-0.025*(m4+m6) +mrt_V7*m9+mrt_V11*m10+mrt_V8*m11
+mrt_V12*m12+0.25*m13+0.125*(m17-m16);
//nread = neighborList[n+6*Np];
dist[nr7] = fq;
// q = 9
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jy)+0.025*(m4-m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13+0.125*(m16+m17);
//nread = neighborList[n+9*Np];
dist[nr10] = fq;
// q = 10
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jx)+0.025*(m6-m4)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13-0.125*(m16+m17);
//nread = neighborList[n+8*Np];
dist[nr9] = fq;
// q = 11
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx+jz)+0.025*(m4+m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12+0.25*m15+0.125*(m18-m16);
//nread = neighborList[n+11*Np];
dist[nr12] = fq;
// q = 12
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jz)-0.025*(m4+m8)+
mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m16-m18);
//nread = neighborList[n+10*Np];
dist[nr11]= fq;
// q = 13
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx-jz)+0.025*(m4-m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15-0.125*(m16+m18);
//nread = neighborList[n+13*Np];
dist[nr14] = fq;
// q= 14
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jx)+0.025*(m8-m4)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15+0.125*(m16+m18);
//nread = neighborList[n+12*Np];
dist[nr13] = fq;
// q = 15
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy+jz)+0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m17-m18);
nread = neighborList[n+15*Np];
dist[nread] = fq;
// q = 16
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2-0.1*(jy+jz)-0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m18-m17);
nread = neighborList[n+14*Np];
dist[nread] = fq;
// q = 17
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy-jz)+0.025*(m6-m8)
-mrt_V6*m9-mrt_V7*m10-0.25*m14+0.125*(m17+m18);
nread = neighborList[n+17*Np];
dist[nread] = fq;
// q = 18
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jy)+0.025*(m8-m6)
-mrt_V6*m9-mrt_V7*m10-0.25*m14-0.125*(m17+m18);
nread = neighborList[n+16*Np];
dist[nread] = fq;
//........................................................................
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_AAeven_Greyscale_MRT(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity,double rho0, double *Pressure){
int n;
double vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;//defined for this incompressible model
// conserved momemnts
double rho,jx,jy,jz;
// non-conserved moments
double m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
double fq;
//double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
double rlx_setA = rlx;
double rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
const double mrt_V1=0.05263157894736842;
const double mrt_V2=0.012531328320802;
const double mrt_V3=0.04761904761904762;
const double mrt_V4=0.004594820384294068;
const double mrt_V5=0.01587301587301587;
const double mrt_V6=0.0555555555555555555555555;
const double mrt_V7=0.02777777777777778;
const double mrt_V8=0.08333333333333333;
const double mrt_V9=0.003341687552213868;
const double mrt_V10=0.003968253968253968;
const double mrt_V11=0.01388888888888889;
const double mrt_V12=0.04166666666666666;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
//........................................................................
// READ THE DISTRIBUTIONS
// (read from opposite array due to previous swap operation)
//........................................................................
// q=0
fq = dist[n];
rho = fq;
m1 = -30.0*fq;
m2 = 12.0*fq;
// q=1
fq = dist[2*Np+n];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jx = fq;
m4 = -4.0*fq;
m9 = 2.0*fq;
m10 = -4.0*fq;
// f2 = dist[10*Np+n];
fq = dist[1*Np+n];
rho += fq;
m1 -= 11.0*(fq);
m2 -= 4.0*(fq);
jx -= fq;
m4 += 4.0*(fq);
m9 += 2.0*(fq);
m10 -= 4.0*(fq);
// q=3
fq = dist[4*Np+n];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy = fq;
m6 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 = fq;
m12 = -2.0*fq;
// q = 4
fq = dist[3*Np+n];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy -= fq;
m6 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 += fq;
m12 -= 2.0*fq;
// q=5
fq = dist[6*Np+n];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz = fq;
m8 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q = 6
fq = dist[5*Np+n];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz -= fq;
m8 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q=7
fq = dist[8*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 = fq;
m16 = fq;
m17 = -fq;
// q = 8
fq = dist[7*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 += fq;
m16 -= fq;
m17 += fq;
// q=9
fq = dist[10*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 += fq;
m17 += fq;
// q = 10
fq = dist[9*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 -= fq;
m17 -= fq;
// q=11
fq = dist[12*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 = fq;
m16 -= fq;
m18 = fq;
// q=12
fq = dist[11*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 += fq;
m16 += fq;
m18 -= fq;
// q=13
fq = dist[14*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 -= fq;
m18 -= fq;
// q=14
fq = dist[13*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 += fq;
m18 += fq;
// q=15
fq = dist[16*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 = fq;
m17 += fq;
m18 -= fq;
// q=16
fq = dist[15*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 += fq;
m17 -= fq;
m18 += fq;
// q=17
fq = dist[18*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 += fq;
m18 += fq;
// q=18
fq = dist[17*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 -= fq;
m18 -= fq;
//---------------------------------------------------------------------//
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
vx = jx/rho0+0.5*porosity*Gx;
vy = jy/rho0+0.5*porosity*Gy;
vz = jz/rho0+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the total force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = rho0*(-porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx);
Fy = rho0*(-porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy);
Fz = rho0*(-porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz);
if (porosity==1.0){
Fx=rho0*Gx;
Fy=rho0*Gy;
Fz=rho0*Gz;
}
//Calculate pressure for Incompressible-MRT model
pressure=rho/3.f;
//-------------------- IMRT collison where body force has NO higher-order terms -------------//
m1 = m1 + rlx_setA*((19*(ux*ux+uy*uy+uz*uz)*rho0/porosity - 11*rho) - m1);
m2 = m2 + rlx_setA*((3*rho - 5.5*(ux*ux+uy*uy+uz*uz)*rho0/porosity) - m2);
jx = jx + Fx;
m4 = m4 + rlx_setB*((-0.6666666666666666*ux*rho0)- m4)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
jy = jy + Fy;
m6 = m6 + rlx_setB*((-0.6666666666666666*uy*rho0)- m6)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
jz = jz + Fz;
m8 = m8 + rlx_setB*((-0.6666666666666666*uz*rho0)- m8)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
m9 = m9 + rlx_setA*(((2*ux*ux-uy*uy-uz*uz)*rho0/porosity) - m9);
m10 = m10 + rlx_setA*( - m10);
//m10 = m10 + rlx_setA*(-0.5*rho0*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10);
m11 = m11 + rlx_setA*(((uy*uy-uz*uz)*rho0/porosity) - m11);
m12 = m12 + rlx_setA*( - m12);
//m12 = m12 + rlx_setA*(-0.5*(rho0*(uy*uy-uz*uz)/porosity)- m12);
m13 = m13 + rlx_setA*( (ux*uy*rho0/porosity) - m13);
m14 = m14 + rlx_setA*( (uy*uz*rho0/porosity) - m14);
m15 = m15 + rlx_setA*( (ux*uz*rho0/porosity) - m15);
m16 = m16 + rlx_setB*( - m16);
m17 = m17 + rlx_setB*( - m17);
m18 = m18 + rlx_setB*( - m18);
//.......................................................................................................
//.................inverse transformation......................................................
// q=0
fq = mrt_V1*rho-mrt_V2*m1+mrt_V3*m2;
dist[n] = fq;
// q = 1
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jx-m4)+mrt_V6*(m9-m10);
dist[1*Np+n] = fq;
// q=2
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m4-jx)+mrt_V6*(m9-m10);
dist[2*Np+n] = fq;
// q = 3
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jy-m6)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
dist[3*Np+n] = fq;
// q = 4
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m6-jy)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
dist[4*Np+n] = fq;
// q = 5
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jz-m8)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
dist[5*Np+n] = fq;
// q = 6
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m8-jz)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
dist[6*Np+n] = fq;
// q = 7
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jy)+0.025*(m4+m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m16-m17);
dist[7*Np+n] = fq;
// q = 8
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jy)-0.025*(m4+m6) +mrt_V7*m9+mrt_V11*m10+mrt_V8*m11
+mrt_V12*m12+0.25*m13+0.125*(m17-m16);
dist[8*Np+n] = fq;
// q = 9
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jy)+0.025*(m4-m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13+0.125*(m16+m17);
dist[9*Np+n] = fq;
// q = 10
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jx)+0.025*(m6-m4)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13-0.125*(m16+m17);
dist[10*Np+n] = fq;
// q = 11
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx+jz)+0.025*(m4+m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12+0.25*m15+0.125*(m18-m16);
dist[11*Np+n] = fq;
// q = 12
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jz)-0.025*(m4+m8)+
mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m16-m18);
dist[12*Np+n] = fq;
// q = 13
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx-jz)+0.025*(m4-m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15-0.125*(m16+m18);
dist[13*Np+n] = fq;
// q= 14
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jx)+0.025*(m8-m4)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15+0.125*(m16+m18);
dist[14*Np+n] = fq;
// q = 15
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy+jz)+0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m17-m18);
dist[15*Np+n] = fq;
// q = 16
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2-0.1*(jy+jz)-0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m18-m17);
dist[16*Np+n] = fq;
// q = 17
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy-jz)+0.025*(m6-m8)
-mrt_V6*m9-mrt_V7*m10-0.25*m14+0.125*(m17+m18);
dist[17*Np+n] = fq;
// q = 18
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jy)+0.025*(m8-m6)
-mrt_V6*m9-mrt_V7*m10-0.25*m14-0.125*(m17+m18);
dist[18*Np+n] = fq;
//........................................................................
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_GreyIMRT_Init(double *dist, int Np, double Den)
{
int n;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x;
if (n<Np ){
dist[n] = Den - 0.6666666666666667;
dist[Np+n] = 0.055555555555555555; //double(100*n)+1.f;
dist[2*Np+n] = 0.055555555555555555; //double(100*n)+2.f;
dist[3*Np+n] = 0.055555555555555555; //double(100*n)+3.f;
dist[4*Np+n] = 0.055555555555555555; //double(100*n)+4.f;
dist[5*Np+n] = 0.055555555555555555; //double(100*n)+5.f;
dist[6*Np+n] = 0.055555555555555555; //double(100*n)+6.f;
dist[7*Np+n] = 0.0277777777777778; //double(100*n)+7.f;
dist[8*Np+n] = 0.0277777777777778; //double(100*n)+8.f;
dist[9*Np+n] = 0.0277777777777778; //double(100*n)+9.f;
dist[10*Np+n] = 0.0277777777777778; //double(100*n)+10.f;
dist[11*Np+n] = 0.0277777777777778; //double(100*n)+11.f;
dist[12*Np+n] = 0.0277777777777778; //double(100*n)+12.f;
dist[13*Np+n] = 0.0277777777777778; //double(100*n)+13.f;
dist[14*Np+n] = 0.0277777777777778; //double(100*n)+14.f;
dist[15*Np+n] = 0.0277777777777778; //double(100*n)+15.f;
dist[16*Np+n] = 0.0277777777777778; //double(100*n)+16.f;
dist[17*Np+n] = 0.0277777777777778; //double(100*n)+17.f;
dist[18*Np+n] = 0.0277777777777778; //double(100*n)+18.f;
}
}
}
extern "C" void ScaLBL_D3Q19_AAeven_Greyscale(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double *Pressure){
dvc_ScaLBL_D3Q19_AAeven_Greyscale<<<NBLOCKS,NTHREADS >>>(dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,Pressure);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAeven_Greyscale: %s \n",cudaGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_AAodd_Greyscale(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double *Pressure){
dvc_ScaLBL_D3Q19_AAodd_Greyscale<<<NBLOCKS,NTHREADS >>>(neighborList,dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,Pressure);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAodd_Greyscale: %s \n",cudaGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_AAeven_Greyscale_IMRT(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double Den,double *Pressure){
dvc_ScaLBL_D3Q19_AAeven_Greyscale_IMRT<<<NBLOCKS,NTHREADS >>>(dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,Den,Pressure);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAeven_Greyscale_IMRT: %s \n",cudaGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_AAodd_Greyscale_IMRT(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double Den,double *Pressure){
dvc_ScaLBL_D3Q19_AAodd_Greyscale_IMRT<<<NBLOCKS,NTHREADS >>>(neighborList,dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,Den,Pressure);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAodd_Greyscale_IMRT: %s \n",cudaGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_AAodd_Greyscale_MRT(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double rho0,double *Pressure){
dvc_ScaLBL_D3Q19_AAodd_Greyscale_MRT<<<NBLOCKS,NTHREADS >>>(neighborList,dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,rho0,Pressure);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAodd_Greyscale_MRT: %s \n",cudaGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_AAeven_Greyscale_MRT(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double rho0,double *Pressure){
dvc_ScaLBL_D3Q19_AAeven_Greyscale_MRT<<<NBLOCKS,NTHREADS >>>(dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,rho0,Pressure);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAeven_Greyscale_MRT: %s \n",cudaGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_GreyIMRT_Init(double *dist, int Np, double Den){
dvc_ScaLBL_D3Q19_GreyIMRT_Init<<<NBLOCKS,NTHREADS >>>(dist, Np, Den);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_GreyIMRT_Init: %s \n",cudaGetErrorString(err));
}
}
|
5c692225d78d333c007f1a333a00fed042629633.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void hello_GPU(void){
int i = threadIdx.x;
printf("hello from GPU[%d]!\n",i);
}
int main(void){
printf("Hello, World - from CPU!\n");
hipLaunchKernelGGL(( hello_GPU), dim3(2),dim3(3), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| 5c692225d78d333c007f1a333a00fed042629633.cu | #include <stdio.h>
__global__ void hello_GPU(void){
int i = threadIdx.x;
printf("hello from GPU[%d]!\n",i);
}
int main(void){
printf("Hello, World - from CPU!\n");
hello_GPU<<<2,3>>>();
cudaDeviceSynchronize();
return 0;
}
|
337d6942ee1d7a027cd342f897f78f91cf13f2dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Tencent is pleased to support the open source community by making TNN available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "tnn/device/cuda/acc/cuda_layer_acc.h"
#include "tnn/utils/dims_utils.h"
namespace TNN_NS {
DECLARE_CUDA_ACC(BitShift, LAYER_BITSHIFT);
__global__ void bitshift_kernel(const int* input, int* output, int count, int bits, int direction) {
CUDA_KERNEL_LOOP(index, count) {
if (direction == 0) {
output[index] = input[index] >> bits;
} else {
output[index] = input[index] << bits;
}
}
}
Status CudaBitShiftLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource,
const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return CudaLayerAcc::Init(context, param, resource, inputs, outputs);;
}
Status CudaBitShiftLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return TNN_OK;
}
Status CudaBitShiftLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
auto layer_param = dynamic_cast<BitShiftLayerParam*>(param_);
CHECK_PARAM_NULL(layer_param);
auto input_data_type = inputs[0]->GetBlobDesc().data_type;
auto input_data = (int*)(inputs[0]->GetHandle().base);
auto output_data = (int *)(outputs[0]->GetHandle().base);
const int count = DimsVectorUtils::Count(inputs[0]->GetBlobDesc().dims);
hipLaunchKernelGGL(( bitshift_kernel), dim3(TNN_CUDA_GET_BLOCKS(count)), dim3(TNN_CUDA_NUM_THREADS), 0, context_->GetStream(),
input_data, output_data, count, layer_param->bits, layer_param->direction);
return TNN_OK;
}
REGISTER_CUDA_ACC(BitShift, LAYER_BITSHIFT);
} // namespace TNN_NS
| 337d6942ee1d7a027cd342f897f78f91cf13f2dc.cu | // Tencent is pleased to support the open source community by making TNN available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "tnn/device/cuda/acc/cuda_layer_acc.h"
#include "tnn/utils/dims_utils.h"
namespace TNN_NS {
DECLARE_CUDA_ACC(BitShift, LAYER_BITSHIFT);
__global__ void bitshift_kernel(const int* input, int* output, int count, int bits, int direction) {
CUDA_KERNEL_LOOP(index, count) {
if (direction == 0) {
output[index] = input[index] >> bits;
} else {
output[index] = input[index] << bits;
}
}
}
Status CudaBitShiftLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource,
const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return CudaLayerAcc::Init(context, param, resource, inputs, outputs);;
}
Status CudaBitShiftLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return TNN_OK;
}
Status CudaBitShiftLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
auto layer_param = dynamic_cast<BitShiftLayerParam*>(param_);
CHECK_PARAM_NULL(layer_param);
auto input_data_type = inputs[0]->GetBlobDesc().data_type;
auto input_data = (int*)(inputs[0]->GetHandle().base);
auto output_data = (int *)(outputs[0]->GetHandle().base);
const int count = DimsVectorUtils::Count(inputs[0]->GetBlobDesc().dims);
bitshift_kernel<<<TNN_CUDA_GET_BLOCKS(count), TNN_CUDA_NUM_THREADS, 0, context_->GetStream()>>>(
input_data, output_data, count, layer_param->bits, layer_param->direction);
return TNN_OK;
}
REGISTER_CUDA_ACC(BitShift, LAYER_BITSHIFT);
} // namespace TNN_NS
|
d74e041c7eb8126bbabdd8b35322c3df5f67a4af.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/layers/base_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.gpu_data(),
top[0]->mutable_gpu_data());
if (this->output_labels_) {
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
// Copy the labels.
caffe_copy(batch->label_.count(), batch->label_.gpu_data(),
top[1]->mutable_gpu_data());
// CPM extra code: if statement
if(top.size() >= 3){
// masks
top[2]->ReshapeLike(batch->missing_part_mask_);
// Copy the labels.
caffe_copy(batch->missing_part_mask_.count(), batch->missing_part_mask_.gpu_data(),
top[2]->mutable_gpu_data());
}
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(hipStreamSynchronize(hipStreamDefault));
prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer);
} // namespace caffe
| d74e041c7eb8126bbabdd8b35322c3df5f67a4af.cu | #include <vector>
#include "caffe/layers/base_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.gpu_data(),
top[0]->mutable_gpu_data());
if (this->output_labels_) {
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
// Copy the labels.
caffe_copy(batch->label_.count(), batch->label_.gpu_data(),
top[1]->mutable_gpu_data());
// CPM extra code: if statement
if(top.size() >= 3){
// masks
top[2]->ReshapeLike(batch->missing_part_mask_);
// Copy the labels.
caffe_copy(batch->missing_part_mask_.count(), batch->missing_part_mask_.gpu_data(),
top[2]->mutable_gpu_data());
}
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault));
prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer);
} // namespace caffe
|
803d22d67687e3a64e8fed69f676a8692d1948fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mex.h"
#include "stdio.h"
#include <string.h>
#include "rocblas.h"
#pragma comment(lib,"cublas.lib")
#define blocksize 32
#define THREAD_NUM 512
#define BLOCK_NUM 2048
__global__ void Im2col(float *In,float *Res_In,int a,int b,int c,int d,int batchsize,int In_channel,int output_channel,\
int pad_needed_height,int pad_needed_width,int new_height,int new_width,int padheight,int padwidth)
{
const int tid=threadIdx.x;
const int bid=blockIdx.x;
int i,k;
int ii,jj,pp,qq,t;
int index,flag;
int height=padheight-c+1;//
int width=padwidth-d+1;
int reh,rew,re1,re2;
for(int u=tid+bid*THREAD_NUM;u<c*d*output_channel*height*width*batchsize;u+= BLOCK_NUM*THREAD_NUM)
{
i=u/(height*width*batchsize);//
k=u%(height*width*batchsize);//
ii=k/(height*width);//batch
jj=i/(c*d); //output_channel
pp=k%(height*width);
qq=i%(c*d);
index=(pp/height)*padheight+pp%height+(qq/c)*padheight+qq%c;
reh=index%padheight-(pad_needed_height+1)/2;
rew=index/padheight-(pad_needed_width+1)/2;
re1=new_height+(new_height-1)*(a-1);
re2=new_width+(new_width-1)*(b-1);
if(reh<0||reh>=re1||rew<0||rew>=re2||(a>1&&(reh%a!=0))||(b>1&&(rew%b!=0)))
Res_In[u]=0;
else{
flag=reh-(reh/a)*(a-1)+new_height*(rew-(rew/b)*(b-1));
t=jj*new_height*new_width*batchsize+ii*new_height*new_width+flag;
Res_In[u]=In[t];
}
}
}
__global__ void K2col(float *W,float *Res_W,int c,int d,int In_channel,int output_channel)
{
const int tid=threadIdx.x;
const int bid=blockIdx.x;
int j,k,l,m,p,q,t;
for(int u=tid+bid*THREAD_NUM;u<c*d*In_channel*output_channel;u+= BLOCK_NUM*THREAD_NUM)
{
j=u/(c*d*output_channel);//In_channel
k=u%(c*d*output_channel);
l=k/(c*d);//output_channel
m=k%(c*d);
p=m%c;
q=m/c;
t=l*c*d*In_channel+j*c*d+c*d-(q*c+p)-1;
Res_W[u]=W[t];
}
}
__global__ void padMatrix(float *output,float *output1,int batchsize,int In_channel,int dh,int dw,int height,int width)
{
const int tid=threadIdx.x;
const int bid=blockIdx.x;
int i,j,index,t,p,q;
for(int u=tid+bid*THREAD_NUM;u<height*width*batchsize*In_channel;u+= BLOCK_NUM*THREAD_NUM)
{
i=u/(height*width);
j=u%(height*width);
p=j/height;
q=j%height;
if(q<(height-dh)&&p<(width-dw))
{t=i*(height-dh)*(width-dw)+p*(height-dh)+q;
output1[u]=output[t];}
else
output1[u]=0;
}
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{ /*output=deconv2d(input,w,strides,padding,outputshape)
%input=[new_height ,new_width ,batchsize ,output_channels]
%w=[filter_height , filter_width ,in_channels, output_channels]
%output=[height ,width ,batchsize ,in_channels]
a=strides(1);b=strides(2);c=size(w,1);d=size(w,2);*/
const size_t *dim_array = mxGetDimensions(prhs[0]);
int new_height=*dim_array,new_width=*(dim_array+1),batchsize=1,output_channel=1;
int number_of_dims = mxGetNumberOfDimensions(prhs[0]);
if(number_of_dims==3)
batchsize=*(dim_array+2);
if(number_of_dims==4)
{batchsize=*(dim_array+2);
output_channel=*(dim_array+3);}
const size_t *dim_array1 = mxGetDimensions(prhs[1]);
int c=*dim_array1,d=*(dim_array1+1),In_channel=1;
int number_of_dims1 = mxGetNumberOfDimensions(prhs[1]);
if(number_of_dims1!=2)
In_channel=*(dim_array1+2);
double *s;
s=mxGetPr(prhs[2]);
int a=int(*s),b=int(*(s+1));
char *padding=mxArrayToString(prhs[3]);
double *outputshape;
outputshape=mxGetPr(prhs[4]);
int height=int(*outputshape),width=int(*(outputshape+1));
float *A=(float*)mxGetPr(prhs[0]);
float *B=(float*)mxGetPr(prhs[1]);
int padheight,padwidth,pad_needed_height,pad_needed_width;
if(strcmp(padding,"SAME")==0)
{
padheight=height+c-1;
padwidth=width+d-1;
pad_needed_height=padheight-(new_height+(new_height-1)*(a-1));
pad_needed_width=padwidth-(new_width+(new_width-1)*(b-1));
}
if(strcmp(padding,"VALID")==0)
{
pad_needed_height=(c-1)*2;
pad_needed_width=(d-1)*2;
padheight=pad_needed_height+new_height+(new_height-1)*(a-1);
padwidth=pad_needed_width+new_width+(new_width-1)*(b-1);
}
float *In,*Res_In,*W,*output,*Res_W,*output1;
size_t size_1,size_2,size_3,size_4,size_5;
size_1=new_height*new_width*batchsize*output_channel*sizeof(float);
size_2=(padheight-c+1)*(padwidth-d+1)*batchsize*output_channel*c*d*sizeof(float);
size_3=c*d*In_channel*output_channel*sizeof(float);
size_4=(padheight-c+1)*(padwidth-d+1)*batchsize*In_channel*sizeof(float);
size_5=height*width*batchsize*In_channel*sizeof(float);
//Input
hipMalloc((void**)&In,size_1);
hipMalloc((void**)&Res_In,size_2);
hipMemcpy(In,A , size_1, hipMemcpyHostToDevice);
Im2col<< <BLOCK_NUM,THREAD_NUM>> >(In,Res_In,a,b,c,d,batchsize,In_channel,output_channel,pad_needed_height,pad_needed_width,new_height,new_width,padheight,padwidth);
hipDeviceSynchronize();
hipFree(In);
//W
hipMalloc((void**)&W,size_3);
hipMalloc((void**)&Res_W,size_3);
hipMemcpy(W,B , size_3, hipMemcpyHostToDevice);
K2col<< <BLOCK_NUM,THREAD_NUM>> >(W,Res_W,c,d,In_channel,output_channel);
hipDeviceSynchronize();
hipFree(W);
/*
const size_t dim1[]={(padheight-c+1)*(padwidth-d+1)*batchsize,output_channel*c*d};
plhs[1] = mxCreateNumericArray(2,dim1 ,mxSINGLE_CLASS, mxREAL);
hipMemcpy((float*)mxGetPr(plhs[1]), Res_In, size_2, hipMemcpyDeviceToHost);
const size_t dim2[]={output_channel*c*d,In_channel};
plhs[2] = mxCreateNumericArray(2,dim2 ,mxSINGLE_CLASS, mxREAL);
hipMemcpy((float*)mxGetPr(plhs[2]), Res_W, size_3, hipMemcpyDeviceToHost);
*/
//
hipMalloc((void**)&output,size_4);
int L_rows=(padheight-c+1)*(padwidth-d+1)*batchsize,L_cols=output_channel*c*d,R_cols=In_channel;
/*
dim3 dimBlock(blocksize, blocksize);
OutputMatrix<< <BLOCK_NUM,dimBlock>> >(Res_In,Res_W,output,L_rows,L_cols,R_cols);
*/
float alpha=1,beta=0;
hipblasHandle_t handle;
hipblasCreate(&handle);
hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,L_rows,R_cols,L_cols,&alpha,Res_In, L_rows,Res_W,L_cols,&beta,output,L_rows);
hipblasDestroy(handle);
//hipDeviceSynchronize();
hipFree(Res_In);
hipFree(Res_W);
//VALID
const size_t dim[]={height ,width,batchsize, In_channel};
plhs[0] = mxCreateNumericArray(4,dim ,mxSINGLE_CLASS, mxREAL);
if(height==(padheight-c+1)&&width==(padwidth-d+1))
{
hipMemcpy((float*)mxGetPr(plhs[0]), output, size_5, hipMemcpyDeviceToHost);
hipFree(output);
}
else
{
hipMalloc((void**)&output1,size_5);
padMatrix<< <BLOCK_NUM,THREAD_NUM>> >(output,output1,batchsize, In_channel,height-(padheight-c+1),width-(padwidth-d+1),height,width);
hipMemcpy((float*)mxGetPr(plhs[0]), output1, size_5, hipMemcpyDeviceToHost);
hipFree(output);
hipFree(output1);
}
}
| 803d22d67687e3a64e8fed69f676a8692d1948fd.cu | #include "mex.h"
#include "stdio.h"
#include <string.h>
#include "cublas_v2.h"
#pragma comment(lib,"cublas.lib")
#define blocksize 32
#define THREAD_NUM 512
#define BLOCK_NUM 2048
__global__ void Im2col(float *In,float *Res_In,int a,int b,int c,int d,int batchsize,int In_channel,int output_channel,\
int pad_needed_height,int pad_needed_width,int new_height,int new_width,int padheight,int padwidth)
{
const int tid=threadIdx.x;
const int bid=blockIdx.x;
int i,k;
int ii,jj,pp,qq,t;
int index,flag;
int height=padheight-c+1;//注意区别
int width=padwidth-d+1;
int reh,rew,re1,re2;
for(int u=tid+bid*THREAD_NUM;u<c*d*output_channel*height*width*batchsize;u+= BLOCK_NUM*THREAD_NUM)
{
i=u/(height*width*batchsize);//位于哪列
k=u%(height*width*batchsize);//位于哪行
ii=k/(height*width);//位于哪个batch
jj=i/(c*d); //位于哪个output_channel
pp=k%(height*width);
qq=i%(c*d);
index=(pp/height)*padheight+pp%height+(qq/c)*padheight+qq%c;
reh=index%padheight-(pad_needed_height+1)/2;
rew=index/padheight-(pad_needed_width+1)/2;
re1=new_height+(new_height-1)*(a-1);
re2=new_width+(new_width-1)*(b-1);
if(reh<0||reh>=re1||rew<0||rew>=re2||(a>1&&(reh%a!=0))||(b>1&&(rew%b!=0)))
Res_In[u]=0;
else{
flag=reh-(reh/a)*(a-1)+new_height*(rew-(rew/b)*(b-1));
t=jj*new_height*new_width*batchsize+ii*new_height*new_width+flag;
Res_In[u]=In[t];
}
}
}
__global__ void K2col(float *W,float *Res_W,int c,int d,int In_channel,int output_channel)
{
const int tid=threadIdx.x;
const int bid=blockIdx.x;
int j,k,l,m,p,q,t;
for(int u=tid+bid*THREAD_NUM;u<c*d*In_channel*output_channel;u+= BLOCK_NUM*THREAD_NUM)
{
j=u/(c*d*output_channel);//前面有几个In_channel
k=u%(c*d*output_channel);
l=k/(c*d);//前面有几个output_channel
m=k%(c*d);
p=m%c;
q=m/c;
t=l*c*d*In_channel+j*c*d+c*d-(q*c+p)-1;
Res_W[u]=W[t];
}
}
__global__ void padMatrix(float *output,float *output1,int batchsize,int In_channel,int dh,int dw,int height,int width)
{
const int tid=threadIdx.x;
const int bid=blockIdx.x;
int i,j,index,t,p,q;
for(int u=tid+bid*THREAD_NUM;u<height*width*batchsize*In_channel;u+= BLOCK_NUM*THREAD_NUM)
{
i=u/(height*width);
j=u%(height*width);
p=j/height;
q=j%height;
if(q<(height-dh)&&p<(width-dw))
{t=i*(height-dh)*(width-dw)+p*(height-dh)+q;
output1[u]=output[t];}
else
output1[u]=0;
}
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{ /*output=deconv2d(input,w,strides,padding,outputshape)
%input=[new_height ,new_width ,batchsize ,output_channels]
%w=[filter_height , filter_width ,in_channels, output_channels]
%output=[height ,width ,batchsize ,in_channels]
a=strides(1);b=strides(2);c=size(w,1);d=size(w,2);*/
const size_t *dim_array = mxGetDimensions(prhs[0]);
int new_height=*dim_array,new_width=*(dim_array+1),batchsize=1,output_channel=1;
int number_of_dims = mxGetNumberOfDimensions(prhs[0]);
if(number_of_dims==3)
batchsize=*(dim_array+2);
if(number_of_dims==4)
{batchsize=*(dim_array+2);
output_channel=*(dim_array+3);}
const size_t *dim_array1 = mxGetDimensions(prhs[1]);
int c=*dim_array1,d=*(dim_array1+1),In_channel=1;
int number_of_dims1 = mxGetNumberOfDimensions(prhs[1]);
if(number_of_dims1!=2)
In_channel=*(dim_array1+2);
double *s;
s=mxGetPr(prhs[2]);
int a=int(*s),b=int(*(s+1));
char *padding=mxArrayToString(prhs[3]);
double *outputshape;
outputshape=mxGetPr(prhs[4]);
int height=int(*outputshape),width=int(*(outputshape+1));
float *A=(float*)mxGetPr(prhs[0]);
float *B=(float*)mxGetPr(prhs[1]);
int padheight,padwidth,pad_needed_height,pad_needed_width;
if(strcmp(padding,"SAME")==0)
{
padheight=height+c-1;
padwidth=width+d-1;
pad_needed_height=padheight-(new_height+(new_height-1)*(a-1));
pad_needed_width=padwidth-(new_width+(new_width-1)*(b-1));
}
if(strcmp(padding,"VALID")==0)
{
pad_needed_height=(c-1)*2;
pad_needed_width=(d-1)*2;
padheight=pad_needed_height+new_height+(new_height-1)*(a-1);
padwidth=pad_needed_width+new_width+(new_width-1)*(b-1);
}
float *In,*Res_In,*W,*output,*Res_W,*output1;
size_t size_1,size_2,size_3,size_4,size_5;
size_1=new_height*new_width*batchsize*output_channel*sizeof(float);
size_2=(padheight-c+1)*(padwidth-d+1)*batchsize*output_channel*c*d*sizeof(float);
size_3=c*d*In_channel*output_channel*sizeof(float);
size_4=(padheight-c+1)*(padwidth-d+1)*batchsize*In_channel*sizeof(float);
size_5=height*width*batchsize*In_channel*sizeof(float);
//调整Input
cudaMalloc((void**)&In,size_1);
cudaMalloc((void**)&Res_In,size_2);
cudaMemcpy(In,A , size_1, cudaMemcpyHostToDevice);
Im2col<< <BLOCK_NUM,THREAD_NUM>> >(In,Res_In,a,b,c,d,batchsize,In_channel,output_channel,pad_needed_height,pad_needed_width,new_height,new_width,padheight,padwidth);
cudaThreadSynchronize();
cudaFree(In);
//调整W
cudaMalloc((void**)&W,size_3);
cudaMalloc((void**)&Res_W,size_3);
cudaMemcpy(W,B , size_3, cudaMemcpyHostToDevice);
K2col<< <BLOCK_NUM,THREAD_NUM>> >(W,Res_W,c,d,In_channel,output_channel);
cudaThreadSynchronize();
cudaFree(W);
/*
const size_t dim1[]={(padheight-c+1)*(padwidth-d+1)*batchsize,output_channel*c*d};
plhs[1] = mxCreateNumericArray(2,dim1 ,mxSINGLE_CLASS, mxREAL);
cudaMemcpy((float*)mxGetPr(plhs[1]), Res_In, size_2, cudaMemcpyDeviceToHost);
const size_t dim2[]={output_channel*c*d,In_channel};
plhs[2] = mxCreateNumericArray(2,dim2 ,mxSINGLE_CLASS, mxREAL);
cudaMemcpy((float*)mxGetPr(plhs[2]), Res_W, size_3, cudaMemcpyDeviceToHost);
*/
//矩阵相乘
cudaMalloc((void**)&output,size_4);
int L_rows=(padheight-c+1)*(padwidth-d+1)*batchsize,L_cols=output_channel*c*d,R_cols=In_channel;
/*
dim3 dimBlock(blocksize, blocksize);
OutputMatrix<< <BLOCK_NUM,dimBlock>> >(Res_In,Res_W,output,L_rows,L_cols,R_cols);
*/
float alpha=1,beta=0;
cublasHandle_t handle;
cublasCreate(&handle);
cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,L_rows,R_cols,L_cols,&alpha,Res_In, L_rows,Res_W,L_cols,&beta,output,L_rows);
cublasDestroy(handle);
//cudaThreadSynchronize();
cudaFree(Res_In);
cudaFree(Res_W);
//对于前向计算时VALID舍弃的调整
const size_t dim[]={height ,width,batchsize, In_channel};
plhs[0] = mxCreateNumericArray(4,dim ,mxSINGLE_CLASS, mxREAL);
if(height==(padheight-c+1)&&width==(padwidth-d+1))
{
cudaMemcpy((float*)mxGetPr(plhs[0]), output, size_5, cudaMemcpyDeviceToHost);
cudaFree(output);
}
else
{
cudaMalloc((void**)&output1,size_5);
padMatrix<< <BLOCK_NUM,THREAD_NUM>> >(output,output1,batchsize, In_channel,height-(padheight-c+1),width-(padwidth-d+1),height,width);
cudaMemcpy((float*)mxGetPr(plhs[0]), output1, size_5, cudaMemcpyDeviceToHost);
cudaFree(output);
cudaFree(output1);
}
}
|
722fde820adb5fa173f2957fdd6799254522c761.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cudaSmult_kernel(unsigned int size, const float *x1, const float *x2, float *y)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
y[i] = x1[i] * x2[i];
}
} | 722fde820adb5fa173f2957fdd6799254522c761.cu | #include "includes.h"
__global__ void cudaSmult_kernel(unsigned int size, const float *x1, const float *x2, float *y)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
y[i] = x1[i] * x2[i];
}
} |
a20354ef131d8a5a13073e50ed6806d802ff06d3.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathPairwise.hip"
#else
int THCTensor_(equal)(THCState *state, THCTensor *self_, THCTensor *src_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (!THCTensor_(isSameSizeAs(state, self_, src_))) {
return 0;
}
// This is not as efficient as TH, but the basic idea: create a buffer that stores
// 1 if the two tensors are equal at a position, otherwise 0. If the minimum value
// in this buffer is 1, the two tensors are equal, otherwise they are not
// Both tensors are empty
if(THTensor_(nElement)(self_) == 0) return true;
THCudaByteTensor *buf = THCudaByteTensor_newWithSize(state, self_->sizes(), {});
if (!THC_pointwiseApply3<uint8_t, scalar_t, scalar_t>(state, buf, self_, src_, TensorEQOp<scalar_t, unsigned char>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
unsigned char min = THCudaByteTensor_minall(state, buf);
THCudaByteTensor_free(state, buf);
return min != 0;
}
void THCTensor_(bitand)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
return THError("bitand only supported for integer type tensors");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorBitAndConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorBitAndConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(bitor)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
return THError("bitor only supported for integer type tensors");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorBitOrConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorBitOrConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(bitxor)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
return THError("bitxor only supported for integer type tensors");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorBitXorConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorBitXorConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
#if !defined(THC_REAL_IS_BOOL)
void THCTensor_(add)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorAddConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorAddConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(sub)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSubConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorSubConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(add_scaled)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value, scalar_t alpha)
{
THCTensor_(add)(state, self_, src_, value * alpha);
}
void THCTensor_(sub_scaled)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value, scalar_t alpha)
{
THCTensor_(sub)(state, self_, src_, value * alpha);
}
void THCTensor_(mul)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorMulConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorMulConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(div)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
THArgCheck(value != ScalarConvert<int, scalar_t>::to(0), 3, "divide by zero");
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorDivConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorDivConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(lshift)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
THCTensor_(mul)(state, self_, src_, pow(2, value));
#elif defined(THC_REAL_IS_HALF)
return THError("lshift not supported for torch.CudaHalfTensor");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorLShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorLShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(rshift)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
THCTensor_(mul)(state, self_, src_, pow(2, -value));
#elif defined(THC_REAL_IS_HALF)
return THError("rshift not supported for torch.CudaHalfTensor");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorRShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorRShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(fmod)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorFmodOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorFmodOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(remainder)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorRemainderOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorRemainderOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(triu)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
THArgCheck(!src_->is_empty() && src_->dim() == 2, 1, "expected a matrix");
if (self_ != src_)
THCTensor_(resizeAs)(state, self_, src_);
int64_t stride0 = self_->stride(0);
int64_t stride1 = self_->stride(1);
scalar_t *start = THCTensor_(data)(state, self_);
TensorTriOp<scalar_t, 1> op(start, stride0, stride1, k);
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, src_, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
#endif
#endif
| a20354ef131d8a5a13073e50ed6806d802ff06d3.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathPairwise.cu"
#else
int THCTensor_(equal)(THCState *state, THCTensor *self_, THCTensor *src_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (!THCTensor_(isSameSizeAs(state, self_, src_))) {
return 0;
}
// This is not as efficient as TH, but the basic idea: create a buffer that stores
// 1 if the two tensors are equal at a position, otherwise 0. If the minimum value
// in this buffer is 1, the two tensors are equal, otherwise they are not
// Both tensors are empty
if(THTensor_(nElement)(self_) == 0) return true;
THCudaByteTensor *buf = THCudaByteTensor_newWithSize(state, self_->sizes(), {});
if (!THC_pointwiseApply3<uint8_t, scalar_t, scalar_t>(state, buf, self_, src_, TensorEQOp<scalar_t, unsigned char>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
unsigned char min = THCudaByteTensor_minall(state, buf);
THCudaByteTensor_free(state, buf);
return min != 0;
}
void THCTensor_(bitand)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
return THError("bitand only supported for integer type tensors");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorBitAndConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorBitAndConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(bitor)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
return THError("bitor only supported for integer type tensors");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorBitOrConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorBitOrConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(bitxor)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
return THError("bitxor only supported for integer type tensors");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorBitXorConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorBitXorConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
#if !defined(THC_REAL_IS_BOOL)
void THCTensor_(add)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorAddConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorAddConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(sub)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSubConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorSubConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(add_scaled)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value, scalar_t alpha)
{
THCTensor_(add)(state, self_, src_, value * alpha);
}
void THCTensor_(sub_scaled)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value, scalar_t alpha)
{
THCTensor_(sub)(state, self_, src_, value * alpha);
}
void THCTensor_(mul)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorMulConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorMulConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(div)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
THArgCheck(value != ScalarConvert<int, scalar_t>::to(0), 3, "divide by zero");
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorDivConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorDivConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(lshift)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
THCTensor_(mul)(state, self_, src_, pow(2, value));
#elif defined(THC_REAL_IS_HALF)
return THError("lshift not supported for torch.CudaHalfTensor");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorLShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorLShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(rshift)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
THCTensor_(mul)(state, self_, src_, pow(2, -value));
#elif defined(THC_REAL_IS_HALF)
return THError("rshift not supported for torch.CudaHalfTensor");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorRShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorRShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(fmod)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorFmodOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorFmodOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(remainder)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorRemainderOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorRemainderOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(triu)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
THArgCheck(!src_->is_empty() && src_->dim() == 2, 1, "expected a matrix");
if (self_ != src_)
THCTensor_(resizeAs)(state, self_, src_);
int64_t stride0 = self_->stride(0);
int64_t stride1 = self_->stride(1);
scalar_t *start = THCTensor_(data)(state, self_);
TensorTriOp<scalar_t, 1> op(start, stride0, stride1, k);
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, src_, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
#endif
#endif
|
53bf8b7aace7027f65832bc30a5d28435ab85b30.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
#include "../include/repeat2.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
#define NUM_OF_THREADS 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
int gcf(int a, int b)
{
if (a == 0) return b;
return gcf(b % a, a);
}
// Device code
const int page_size = 4; // Scale stride and arrays by page size.
__global__ void global_latency (unsigned int * my_array, int array_length, int iterations, unsigned * duration) {
unsigned sum_time = 0;
duration[0] = 0;
unsigned j=0;
unsigned LINESIZE= 1;
unsigned CACHESIZE= 4096;
unsigned LIMIT=0;
int m=0;
/*
// fill L1/L2 cache
for (int k=0; k<CACHESIZE; k+=LINESIZE){
m=k%array_length;
j+=my_array[m];
}
if (j>=array_length) j=0;
*/
int tid = blockDim.x * blockIdx.x + threadIdx.x;
j=tid;
for (int k = 0; k < iterations; k++) {
repeat1(j = my_array[j];)
// repeat1024(j=*(unsigned int **)j
}
//my_array[array_length] = (unsigned int)j;
//my_array[array_length+1] = (unsigned int) sum_time;
duration[0] = j;
}
void parametric_measure_global(int N, int iterations, int stride) {
int i;
int j=0;
unsigned int * h_a;
unsigned int * d_a;
unsigned * duration;
unsigned long long * latency;
unsigned long long latency_sum = 0;
// Don't die if too much memory was requested.
if (N > 650000000) { printf ("OOM.\n"); return; }
// allocate arrays on CPU
h_a = (unsigned int *)malloc(sizeof(unsigned int) * (N+2+NUM_OF_THREADS));
latency = (unsigned long long *)malloc(sizeof(unsigned long long));
// allocate arrays on GPU
hipMalloc ((void **) &d_a, sizeof(unsigned int) * (N+2+NUM_OF_THREADS));
hipMalloc ((void **) &duration, sizeof(unsigned long long));
// initialize array elements on CPU with pointers into d_a.
int step = gcf (stride, N); // Optimization: Initialize fewer elements.
for (i = 0; i < N; i += step) {
// Device pointers are 32-bit on GT200.
for (j=0; j<NUM_OF_THREADS; j++)
h_a[i+j] = ((i + j + stride) % N);
}
for (j=0; j<NUM_OF_THREADS; j++)
h_a[N+j] = j;
h_a[N+NUM_OF_THREADS] = 0;
hipDeviceSynchronize ();
// copy array elements from CPU to GPU
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned int) * N, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
// Launch a multiple of 10 iterations of the same kernel and take the average to eliminate interconnect (TPCs) effects
for (int l=0; l <1; l++) {
// launch kernel
dim3 Db = dim3(NUM_OF_THREADS);
dim3 Dg = dim3(1,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
hipLaunchKernelGGL(( global_latency) , dim3(Dg), dim3(Db), 0, 0, d_a,N, iterations, duration);
//global_latency <<<Dg, Db>>> ();
hipDeviceSynchronize ();
hipError_t error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error is %s\n", hipGetErrorString(error_id));
}
// copy results from GPU to CPU
hipDeviceSynchronize ();
//hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned int) * (N+2), hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long), hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
latency_sum+=latency[0];
}
// free memory on GPU
hipFree(d_a);
hipFree(duration);
hipDeviceSynchronize ();
// free memory on CPU
free(h_a);
free(latency);
// return 0;
}
// Host code
int main() {
printf("Assuming page size is %d KB\n", page_size);
// we will measure latency of global memory
// One thread that accesses an array.
// loads are dependent on the previously loaded values
int N, iterations, stride;
// initialize upper bounds here
int stride_upper_bound;
printf("Global1: Global memory latency for 1 KB array and varying strides.\n");
printf(" stride (bytes), latency (clocks)\n");
N= 536870912;
iterations = 40;
stride_upper_bound = N;
stride= 4096;
//for (stride = 1; stride <= (stride_upper_bound) ; stride+=1) {
// printf (" %5d, ", stride*4);
parametric_measure_global(N, iterations, stride);
//}
return 0;
}
| 53bf8b7aace7027f65832bc30a5d28435ab85b30.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
#include "../include/repeat2.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
#define NUM_OF_THREADS 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
int gcf(int a, int b)
{
if (a == 0) return b;
return gcf(b % a, a);
}
// Device code
const int page_size = 4; // Scale stride and arrays by page size.
__global__ void global_latency (unsigned int * my_array, int array_length, int iterations, unsigned * duration) {
unsigned sum_time = 0;
duration[0] = 0;
unsigned j=0;
unsigned LINESIZE= 1;
unsigned CACHESIZE= 4096;
unsigned LIMIT=0;
int m=0;
/*
// fill L1/L2 cache
for (int k=0; k<CACHESIZE; k+=LINESIZE){
m=k%array_length;
j+=my_array[m];
}
if (j>=array_length) j=0;
*/
int tid = blockDim.x * blockIdx.x + threadIdx.x;
j=tid;
for (int k = 0; k < iterations; k++) {
repeat1(j = my_array[j];)
// repeat1024(j=*(unsigned int **)j
}
//my_array[array_length] = (unsigned int)j;
//my_array[array_length+1] = (unsigned int) sum_time;
duration[0] = j;
}
void parametric_measure_global(int N, int iterations, int stride) {
int i;
int j=0;
unsigned int * h_a;
unsigned int * d_a;
unsigned * duration;
unsigned long long * latency;
unsigned long long latency_sum = 0;
// Don't die if too much memory was requested.
if (N > 650000000) { printf ("OOM.\n"); return; }
// allocate arrays on CPU
h_a = (unsigned int *)malloc(sizeof(unsigned int) * (N+2+NUM_OF_THREADS));
latency = (unsigned long long *)malloc(sizeof(unsigned long long));
// allocate arrays on GPU
cudaMalloc ((void **) &d_a, sizeof(unsigned int) * (N+2+NUM_OF_THREADS));
cudaMalloc ((void **) &duration, sizeof(unsigned long long));
// initialize array elements on CPU with pointers into d_a.
int step = gcf (stride, N); // Optimization: Initialize fewer elements.
for (i = 0; i < N; i += step) {
// Device pointers are 32-bit on GT200.
for (j=0; j<NUM_OF_THREADS; j++)
h_a[i+j] = ((i + j + stride) % N);
}
for (j=0; j<NUM_OF_THREADS; j++)
h_a[N+j] = j;
h_a[N+NUM_OF_THREADS] = 0;
cudaThreadSynchronize ();
// copy array elements from CPU to GPU
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned int) * N, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
// Launch a multiple of 10 iterations of the same kernel and take the average to eliminate interconnect (TPCs) effects
for (int l=0; l <1; l++) {
// launch kernel
dim3 Db = dim3(NUM_OF_THREADS);
dim3 Dg = dim3(1,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
global_latency <<<Dg, Db>>>(d_a,N, iterations, duration);
//global_latency <<<Dg, Db>>> ();
cudaThreadSynchronize ();
cudaError_t error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error is %s\n", cudaGetErrorString(error_id));
}
// copy results from GPU to CPU
cudaThreadSynchronize ();
//cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned int) * (N+2), cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long), cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
latency_sum+=latency[0];
}
// free memory on GPU
cudaFree(d_a);
cudaFree(duration);
cudaThreadSynchronize ();
// free memory on CPU
free(h_a);
free(latency);
// return 0;
}
// Host code
int main() {
printf("Assuming page size is %d KB\n", page_size);
// we will measure latency of global memory
// One thread that accesses an array.
// loads are dependent on the previously loaded values
int N, iterations, stride;
// initialize upper bounds here
int stride_upper_bound;
printf("Global1: Global memory latency for 1 KB array and varying strides.\n");
printf(" stride (bytes), latency (clocks)\n");
N= 536870912;
iterations = 40;
stride_upper_bound = N;
stride= 4096;
//for (stride = 1; stride <= (stride_upper_bound) ; stride+=1) {
// printf (" %5d, ", stride*4);
parametric_measure_global(N, iterations, stride);
//}
return 0;
}
|
eb4b15a3820a2326137a9448a2c858498a6bb18c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO ORC reader class implementation
*/
#include "orc.hpp"
#include "orc_gpu.hpp"
#include "reader_impl.hpp"
#include <io/comp/gpuinflate.hpp>
#include <io/comp/nvcomp_adapter.hpp>
#include <io/utilities/config_utils.hpp>
#include <cudf/detail/timezone.hpp>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/pair.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <algorithm>
#include <iterator>
namespace cudf::io::detail::orc {
using namespace cudf::io::orc;
namespace {
/**
* @brief Keeps track of orc mapping and child column details.
*/
struct reader_column_meta {
// Mapping between column id in orc to processing order.
std::vector<std::vector<size_type>> orc_col_map;
// Number of rows in child columns.
std::vector<uint32_t> num_child_rows;
// Consists of parent column valid_map and null count.
std::vector<column_validity_info> parent_column_data;
std::vector<size_type> parent_column_index;
// Start row of child columns [stripe][column].
std::vector<uint32_t> child_start_row;
// Number of rows of child columns [stripe][column].
std::vector<uint32_t> num_child_rows_per_stripe;
struct row_group_meta {
uint32_t num_rows; // number of rows in a column in a row group
uint32_t start_row; // start row in a column in a row group
};
// Row group metadata [rowgroup][column].
std::vector<row_group_meta> rwgrp_meta;
};
/**
* @brief Struct that maps ORC streams to columns
*/
struct orc_stream_info {
explicit orc_stream_info(uint64_t offset_,
std::size_t dst_pos_,
uint32_t length_,
uint32_t stripe_idx_)
: offset(offset_), dst_pos(dst_pos_), length(length_), stripe_idx(stripe_idx_)
{
}
uint64_t offset; // offset in file
std::size_t dst_pos; // offset in memory relative to start of compressed stripe data
std::size_t length; // length in file
uint32_t stripe_idx; // stripe index
};
/**
* @brief Function that populates column descriptors stream/chunk
*/
std::size_t gather_stream_info(std::size_t stripe_index,
orc::StripeInformation const* stripeinfo,
orc::StripeFooter const* stripefooter,
host_span<int const> orc2gdf,
host_span<orc::SchemaType const> types,
bool use_index,
bool apply_struct_map,
std::size_t* num_dictionary_entries,
std::vector<orc_stream_info>& stream_info,
cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks)
{
uint64_t src_offset = 0;
uint64_t dst_offset = 0;
auto const get_stream_index_type = [](orc::StreamKind kind) {
switch (kind) {
case orc::DATA: return gpu::CI_DATA;
case orc::LENGTH:
case orc::SECONDARY: return gpu::CI_DATA2;
case orc::DICTIONARY_DATA: return gpu::CI_DICTIONARY;
case orc::PRESENT: return gpu::CI_PRESENT;
case orc::ROW_INDEX: return gpu::CI_INDEX;
default:
// Skip this stream as it's not strictly required
return gpu::CI_NUM_STREAMS;
}
};
for (auto const& stream : stripefooter->streams) {
if (!stream.column_id || *stream.column_id >= orc2gdf.size()) {
dst_offset += stream.length;
continue;
}
auto const column_id = *stream.column_id;
auto col = orc2gdf[column_id];
if (col == -1 and apply_struct_map) {
// A struct-type column has no data itself, but rather child columns
// for each of its fields. There is only a PRESENT stream, which
// needs to be included for the reader.
auto const schema_type = types[column_id];
if (not schema_type.subtypes.empty()) {
if (schema_type.kind == orc::STRUCT && stream.kind == orc::PRESENT) {
for (auto const& idx : schema_type.subtypes) {
auto child_idx = (idx < orc2gdf.size()) ? orc2gdf[idx] : -1;
if (child_idx >= 0) {
col = child_idx;
auto& chunk = chunks[stripe_index][col];
chunk.strm_id[gpu::CI_PRESENT] = stream_info.size();
chunk.strm_len[gpu::CI_PRESENT] = stream.length;
}
}
}
}
}
if (col != -1) {
if (src_offset >= stripeinfo->indexLength || use_index) {
auto& chunk = chunks[stripe_index][col];
auto const index_type = get_stream_index_type(stream.kind);
if (index_type < gpu::CI_NUM_STREAMS) {
chunk.strm_id[index_type] = stream_info.size();
chunk.strm_len[index_type] = stream.length;
// NOTE: skip_count field is temporarily used to track the presence of index streams
chunk.skip_count |= 1 << index_type;
if (index_type == gpu::CI_DICTIONARY) {
chunk.dictionary_start = *num_dictionary_entries;
chunk.dict_len = stripefooter->columns[column_id].dictionarySize;
*num_dictionary_entries += stripefooter->columns[column_id].dictionarySize;
}
}
}
stream_info.emplace_back(
stripeinfo->offset + src_offset, dst_offset, stream.length, stripe_index);
dst_offset += stream.length;
}
src_offset += stream.length;
}
return dst_offset;
}
/**
* @brief Decompresses the stripe data, at stream granularity.
*
* @param decompressor Block decompressor
* @param stripe_data List of source stripe column data
* @param stream_info List of stream to column mappings
* @param chunks Vector of list of column chunk descriptors
* @param row_groups Vector of list of row index descriptors
* @param num_stripes Number of stripes making up column chunks
* @param row_index_stride Distance between each row index
* @param use_base_stride Whether to use base stride obtained from meta or use the computed value
* @param stream CUDA stream used for device memory operations and kernel launches
* @return Device buffer to decompressed page data
*/
rmm::device_buffer decompress_stripe_data(
OrcDecompressor const& decompressor,
host_span<rmm::device_buffer const> stripe_data,
host_span<orc_stream_info> stream_info,
cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks,
cudf::detail::hostdevice_2dvector<gpu::RowGroup>& row_groups,
std::size_t num_stripes,
std::size_t row_index_stride,
bool use_base_stride,
rmm::cuda_stream_view stream)
{
// Parse the columns' compressed info
cudf::detail::hostdevice_vector<gpu::CompressedStreamInfo> compinfo(
0, stream_info.size(), stream);
for (auto const& info : stream_info) {
compinfo.push_back(gpu::CompressedStreamInfo(
static_cast<uint8_t const*>(stripe_data[info.stripe_idx].data()) + info.dst_pos,
info.length));
}
compinfo.host_to_device_async(stream);
gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor.GetBlockSize(),
decompressor.GetLog2MaxCompressionRatio(),
stream);
compinfo.device_to_host_sync(stream);
// Count the exact number of compressed blocks
std::size_t num_compressed_blocks = 0;
std::size_t num_uncompressed_blocks = 0;
std::size_t total_decomp_size = 0;
for (std::size_t i = 0; i < compinfo.size(); ++i) {
num_compressed_blocks += compinfo[i].num_compressed_blocks;
num_uncompressed_blocks += compinfo[i].num_uncompressed_blocks;
total_decomp_size += compinfo[i].max_uncompressed_size;
}
CUDF_EXPECTS(
not((num_uncompressed_blocks + num_compressed_blocks > 0) and (total_decomp_size == 0)),
"Inconsistent info on compression blocks");
// Buffer needs to be padded.
// Required by `gpuDecodeOrcColumnData`.
rmm::device_buffer decomp_data(
cudf::util::round_up_safe(total_decomp_size, BUFFER_PADDING_MULTIPLE), stream);
if (decomp_data.is_empty()) { return decomp_data; }
rmm::device_uvector<device_span<uint8_t const>> inflate_in(
num_compressed_blocks + num_uncompressed_blocks, stream);
rmm::device_uvector<device_span<uint8_t>> inflate_out(
num_compressed_blocks + num_uncompressed_blocks, stream);
rmm::device_uvector<compression_result> inflate_res(num_compressed_blocks, stream);
thrust::fill(rmm::exec_policy(stream),
inflate_res.begin(),
inflate_res.end(),
compression_result{0, compression_status::FAILURE});
// Parse again to populate the decompression input/output buffers
std::size_t decomp_offset = 0;
uint32_t max_uncomp_block_size = 0;
uint32_t start_pos = 0;
auto start_pos_uncomp = (uint32_t)num_compressed_blocks;
for (std::size_t i = 0; i < compinfo.size(); ++i) {
auto dst_base = static_cast<uint8_t*>(decomp_data.data());
compinfo[i].uncompressed_data = dst_base + decomp_offset;
compinfo[i].dec_in_ctl = inflate_in.data() + start_pos;
compinfo[i].dec_out_ctl = inflate_out.data() + start_pos;
compinfo[i].dec_res = {inflate_res.data() + start_pos, compinfo[i].num_compressed_blocks};
compinfo[i].copy_in_ctl = inflate_in.data() + start_pos_uncomp;
compinfo[i].copy_out_ctl = inflate_out.data() + start_pos_uncomp;
stream_info[i].dst_pos = decomp_offset;
decomp_offset += compinfo[i].max_uncompressed_size;
start_pos += compinfo[i].num_compressed_blocks;
start_pos_uncomp += compinfo[i].num_uncompressed_blocks;
max_uncomp_block_size =
::max(max_uncomp_block_size, compinfo[i].max_uncompressed_block_size);
}
compinfo.host_to_device_async(stream);
gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor.GetBlockSize(),
decompressor.GetLog2MaxCompressionRatio(),
stream);
// Value for checking whether we decompress successfully.
// It doesn't need to be atomic as there is no race condition: we only write `true` if needed.
cudf::detail::hostdevice_vector<bool> any_block_failure(1, stream);
any_block_failure[0] = false;
any_block_failure.host_to_device_async(stream);
// Dispatch batches of blocks to decompress
if (num_compressed_blocks > 0) {
device_span<device_span<uint8_t const>> inflate_in_view{inflate_in.data(),
num_compressed_blocks};
device_span<device_span<uint8_t>> inflate_out_view{inflate_out.data(), num_compressed_blocks};
switch (decompressor.compression()) {
case compression_type::ZLIB:
if (nvcomp::is_decompression_disabled(nvcomp::compression_type::DEFLATE)) {
gpuinflate(
inflate_in_view, inflate_out_view, inflate_res, gzip_header_included::NO, stream);
} else {
nvcomp::batched_decompress(nvcomp::compression_type::DEFLATE,
inflate_in_view,
inflate_out_view,
inflate_res,
max_uncomp_block_size,
total_decomp_size,
stream);
}
break;
case compression_type::SNAPPY:
if (nvcomp::is_decompression_disabled(nvcomp::compression_type::SNAPPY)) {
gpu_unsnap(inflate_in_view, inflate_out_view, inflate_res, stream);
} else {
nvcomp::batched_decompress(nvcomp::compression_type::SNAPPY,
inflate_in_view,
inflate_out_view,
inflate_res,
max_uncomp_block_size,
total_decomp_size,
stream);
}
break;
case compression_type::ZSTD:
if (auto const reason = nvcomp::is_decompression_disabled(nvcomp::compression_type::ZSTD);
reason) {
CUDF_FAIL("Decompression error: " + reason.value());
}
nvcomp::batched_decompress(nvcomp::compression_type::ZSTD,
inflate_in_view,
inflate_out_view,
inflate_res,
max_uncomp_block_size,
total_decomp_size,
stream);
break;
default: CUDF_FAIL("Unexpected decompression dispatch"); break;
}
// Check if any block has been failed to decompress.
// Not using `thrust::any` or `thrust::count_if` to defer stream sync.
thrust::for_each(
rmm::exec_policy(stream),
thrust::make_counting_iterator(std::size_t{0}),
thrust::make_counting_iterator(inflate_res.size()),
[results = inflate_res.begin(),
any_block_failure = any_block_failure.device_ptr()] __device__(auto const idx) {
if (results[idx].status != compression_status::SUCCESS) { *any_block_failure = true; }
});
}
if (num_uncompressed_blocks > 0) {
device_span<device_span<uint8_t const>> copy_in_view{inflate_in.data() + num_compressed_blocks,
num_uncompressed_blocks};
device_span<device_span<uint8_t>> copy_out_view{inflate_out.data() + num_compressed_blocks,
num_uncompressed_blocks};
gpu_copy_uncompressed_blocks(copy_in_view, copy_out_view, stream);
}
// Copy without stream sync, thus need to wait for stream sync below to access.
any_block_failure.device_to_host_async(stream);
gpu::PostDecompressionReassemble(compinfo.device_ptr(), compinfo.size(), stream);
compinfo.device_to_host_sync(stream); // This also sync stream for `any_block_failure`.
// We can check on host after stream synchronize
CUDF_EXPECTS(not any_block_failure[0], "Error during decompression");
auto const num_columns = chunks.size().second;
// Update the stream information with the updated uncompressed info
// TBD: We could update the value from the information we already
// have in stream_info[], but using the gpu results also updates
// max_uncompressed_size to the actual uncompressed size, or zero if
// decompression failed.
for (std::size_t i = 0; i < num_stripes; ++i) {
for (std::size_t j = 0; j < num_columns; ++j) {
auto& chunk = chunks[i][j];
for (int k = 0; k < gpu::CI_NUM_STREAMS; ++k) {
if (chunk.strm_len[k] > 0 && chunk.strm_id[k] < compinfo.size()) {
chunk.streams[k] = compinfo[chunk.strm_id[k]].uncompressed_data;
chunk.strm_len[k] = compinfo[chunk.strm_id[k]].max_uncompressed_size;
}
}
}
}
if (row_groups.size().first) {
chunks.host_to_device_async(stream);
row_groups.host_to_device_async(stream);
gpu::ParseRowGroupIndex(row_groups.base_device_ptr(),
compinfo.device_ptr(),
chunks.base_device_ptr(),
num_columns,
num_stripes,
row_groups.size().first,
row_index_stride,
use_base_stride,
stream);
}
return decomp_data;
}
/**
* @brief Updates null mask of columns whose parent is a struct column.
*
* If struct column has null element, that row would be skipped while writing child column in ORC,
* so we need to insert the missing null elements in child column. There is another behavior from
* pyspark, where if the child column doesn't have any null elements, it will not have present
* stream, so in that case parent null mask need to be copied to child column.
*
* @param chunks Vector of list of column chunk descriptors
* @param out_buffers Output columns' device buffers
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource to use for device memory allocation
*/
void update_null_mask(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks,
host_span<column_buffer> out_buffers,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_stripes = chunks.size().first;
auto const num_columns = chunks.size().second;
bool is_mask_updated = false;
for (std::size_t col_idx = 0; col_idx < num_columns; ++col_idx) {
if (chunks[0][col_idx].parent_validity_info.valid_map_base != nullptr) {
if (not is_mask_updated) {
chunks.device_to_host_sync(stream);
is_mask_updated = true;
}
auto parent_valid_map_base = chunks[0][col_idx].parent_validity_info.valid_map_base;
auto child_valid_map_base = out_buffers[col_idx].null_mask();
auto child_mask_len =
chunks[0][col_idx].column_num_rows - chunks[0][col_idx].parent_validity_info.null_count;
auto parent_mask_len = chunks[0][col_idx].column_num_rows;
if (child_valid_map_base != nullptr) {
rmm::device_uvector<uint32_t> dst_idx(child_mask_len, stream);
// Copy indexes at which the parent has valid value.
thrust::copy_if(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + parent_mask_len,
dst_idx.begin(),
[parent_valid_map_base] __device__(auto idx) {
return bit_is_set(parent_valid_map_base, idx);
});
auto merged_null_mask = cudf::detail::create_null_mask(
parent_mask_len, mask_state::ALL_NULL, rmm::cuda_stream_view(stream), mr);
auto merged_mask = static_cast<bitmask_type*>(merged_null_mask.data());
uint32_t* dst_idx_ptr = dst_idx.data();
// Copy child valid bits from child column to valid indexes, this will merge both child
// and parent null masks
thrust::for_each(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + dst_idx.size(),
[child_valid_map_base, dst_idx_ptr, merged_mask] __device__(auto idx) {
if (bit_is_set(child_valid_map_base, idx)) {
cudf::set_bit(merged_mask, dst_idx_ptr[idx]);
};
});
out_buffers[col_idx].set_null_mask(std::move(merged_null_mask));
} else {
// Since child column doesn't have a mask, copy parent null mask
auto mask_size = bitmask_allocation_size_bytes(parent_mask_len);
out_buffers[col_idx].set_null_mask(
rmm::device_buffer(static_cast<void*>(parent_valid_map_base), mask_size, stream, mr));
}
}
}
if (is_mask_updated) {
// Update chunks with pointers to column data which might have been changed.
for (std::size_t stripe_idx = 0; stripe_idx < num_stripes; ++stripe_idx) {
for (std::size_t col_idx = 0; col_idx < num_columns; ++col_idx) {
auto& chunk = chunks[stripe_idx][col_idx];
chunk.valid_map_base = out_buffers[col_idx].null_mask();
}
}
chunks.host_to_device_sync(stream);
}
}
/**
* @brief Converts the stripe column data and outputs to columns.
*
* @param num_dicts Number of dictionary entries required
* @param skip_rows Number of rows to offset from start
* @param row_index_stride Distance between each row index
* @param level Current nesting level being processed
* @param tz_table Local time to UTC conversion table
* @param chunks Vector of list of column chunk descriptors
* @param row_groups Vector of list of row index descriptors
* @param out_buffers Output columns' device buffers
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource to use for device memory allocation
*/
void decode_stream_data(std::size_t num_dicts,
std::size_t skip_rows,
std::size_t row_index_stride,
std::size_t level,
table_view const& tz_table,
cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks,
cudf::detail::device_2dspan<gpu::RowGroup> row_groups,
std::vector<column_buffer>& out_buffers,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_stripes = chunks.size().first;
auto const num_columns = chunks.size().second;
thrust::counting_iterator<int> col_idx_it(0);
thrust::counting_iterator<int> stripe_idx_it(0);
// Update chunks with pointers to column data
std::for_each(stripe_idx_it, stripe_idx_it + num_stripes, [&](auto stripe_idx) {
std::for_each(col_idx_it, col_idx_it + num_columns, [&](auto col_idx) {
auto& chunk = chunks[stripe_idx][col_idx];
chunk.column_data_base = out_buffers[col_idx].data();
chunk.valid_map_base = out_buffers[col_idx].null_mask();
});
});
// Allocate global dictionary for deserializing
rmm::device_uvector<gpu::DictionaryEntry> global_dict(num_dicts, stream);
chunks.host_to_device_sync(stream);
gpu::DecodeNullsAndStringDictionaries(
chunks.base_device_ptr(), global_dict.data(), num_columns, num_stripes, skip_rows, stream);
if (level > 0) {
// Update nullmasks for children if parent was a struct and had null mask
update_null_mask(chunks, out_buffers, stream, mr);
}
auto const tz_table_dptr = table_device_view::create(tz_table, stream);
rmm::device_scalar<size_type> error_count(0, stream);
// Update the null map for child columns
gpu::DecodeOrcColumnData(chunks.base_device_ptr(),
global_dict.data(),
row_groups,
num_columns,
num_stripes,
skip_rows,
*tz_table_dptr,
row_groups.size().first,
row_index_stride,
level,
error_count.data(),
stream);
chunks.device_to_host_async(stream);
// `value` synchronizes
auto const num_errors = error_count.value(stream);
CUDF_EXPECTS(num_errors == 0, "ORC data decode failed");
std::for_each(col_idx_it + 0, col_idx_it + num_columns, [&](auto col_idx) {
out_buffers[col_idx].null_count() =
std::accumulate(stripe_idx_it + 0,
stripe_idx_it + num_stripes,
0,
[&](auto null_count, auto const stripe_idx) {
return null_count + chunks[stripe_idx][col_idx].null_count;
});
});
}
/**
* @brief Compute the per-stripe prefix sum of null count, for each struct column in the current
* layer.
*/
void scan_null_counts(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc> const& chunks,
cudf::host_span<rmm::device_uvector<uint32_t>> prefix_sums,
rmm::cuda_stream_view stream)
{
auto const num_stripes = chunks.size().first;
if (num_stripes == 0) return;
auto const num_columns = chunks.size().second;
std::vector<thrust::pair<size_type, cudf::device_span<uint32_t>>> prefix_sums_to_update;
for (auto col_idx = 0ul; col_idx < num_columns; ++col_idx) {
// Null counts sums are only needed for children of struct columns
if (chunks[0][col_idx].type_kind == STRUCT) {
prefix_sums_to_update.emplace_back(col_idx, prefix_sums[col_idx]);
}
}
auto const d_prefix_sums_to_update = cudf::detail::make_device_uvector_async(
prefix_sums_to_update, stream, rmm::mr::get_current_device_resource());
thrust::for_each(rmm::exec_policy(stream),
d_prefix_sums_to_update.begin(),
d_prefix_sums_to_update.end(),
[chunks = cudf::detail::device_2dspan<gpu::ColumnDesc const>{chunks}] __device__(
auto const& idx_psums) {
auto const col_idx = idx_psums.first;
auto const psums = idx_psums.second;
thrust::transform(
thrust::seq,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + psums.size(),
psums.begin(),
[&](auto stripe_idx) { return chunks[stripe_idx][col_idx].null_count; });
thrust::inclusive_scan(thrust::seq, psums.begin(), psums.end(), psums.begin());
});
// `prefix_sums_to_update` goes out of scope, copy has to be done before we return
stream.synchronize();
}
/**
* @brief Aggregate child metadata from parent column chunks.
*/
void aggregate_child_meta(std::size_t level,
cudf::io::orc::detail::column_hierarchy const& selected_columns,
cudf::detail::host_2dspan<gpu::ColumnDesc> chunks,
cudf::detail::host_2dspan<gpu::RowGroup> row_groups,
host_span<orc_column_meta const> list_col,
host_span<column_buffer> out_buffers,
reader_column_meta& col_meta)
{
auto const num_of_stripes = chunks.size().first;
auto const num_of_rowgroups = row_groups.size().first;
auto const num_child_cols = selected_columns.levels[level + 1].size();
auto const number_of_child_chunks = num_child_cols * num_of_stripes;
auto& num_child_rows = col_meta.num_child_rows;
auto& parent_column_data = col_meta.parent_column_data;
// Reset the meta to store child column details.
num_child_rows.resize(selected_columns.levels[level + 1].size());
std::fill(num_child_rows.begin(), num_child_rows.end(), 0);
parent_column_data.resize(number_of_child_chunks);
col_meta.parent_column_index.resize(number_of_child_chunks);
col_meta.child_start_row.resize(number_of_child_chunks);
col_meta.num_child_rows_per_stripe.resize(number_of_child_chunks);
col_meta.rwgrp_meta.resize(num_of_rowgroups * num_child_cols);
auto child_start_row = cudf::detail::host_2dspan<uint32_t>(
col_meta.child_start_row.data(), num_of_stripes, num_child_cols);
auto num_child_rows_per_stripe = cudf::detail::host_2dspan<uint32_t>(
col_meta.num_child_rows_per_stripe.data(), num_of_stripes, num_child_cols);
auto rwgrp_meta = cudf::detail::host_2dspan<reader_column_meta::row_group_meta>(
col_meta.rwgrp_meta.data(), num_of_rowgroups, num_child_cols);
int index = 0; // number of child column processed
// For each parent column, update its child column meta for each stripe.
std::for_each(list_col.begin(), list_col.end(), [&](auto const p_col) {
auto const parent_col_idx = col_meta.orc_col_map[level][p_col.id];
auto start_row = 0;
auto processed_row_groups = 0;
for (std::size_t stripe_id = 0; stripe_id < num_of_stripes; stripe_id++) {
// Aggregate num_rows and start_row from processed parent columns per row groups
if (num_of_rowgroups) {
auto stripe_num_row_groups = chunks[stripe_id][parent_col_idx].num_rowgroups;
auto processed_child_rows = 0;
for (std::size_t rowgroup_id = 0; rowgroup_id < stripe_num_row_groups;
rowgroup_id++, processed_row_groups++) {
auto const child_rows = row_groups[processed_row_groups][parent_col_idx].num_child_rows;
for (size_type id = 0; id < p_col.num_children; id++) {
auto const child_col_idx = index + id;
rwgrp_meta[processed_row_groups][child_col_idx].start_row = processed_child_rows;
rwgrp_meta[processed_row_groups][child_col_idx].num_rows = child_rows;
}
processed_child_rows += child_rows;
}
}
// Aggregate start row, number of rows per chunk and total number of rows in a column
auto const child_rows = chunks[stripe_id][parent_col_idx].num_child_rows;
for (size_type id = 0; id < p_col.num_children; id++) {
auto const child_col_idx = index + id;
num_child_rows[child_col_idx] += child_rows;
num_child_rows_per_stripe[stripe_id][child_col_idx] = child_rows;
// start row could be different for each column when there is nesting at each stripe level
child_start_row[stripe_id][child_col_idx] = (stripe_id == 0) ? 0 : start_row;
}
start_row += child_rows;
}
// Parent column null mask and null count would be required for child column
// to adjust its nullmask.
auto type = out_buffers[parent_col_idx].type.id();
auto parent_null_count = static_cast<uint32_t>(out_buffers[parent_col_idx].null_count());
auto parent_valid_map = out_buffers[parent_col_idx].null_mask();
auto num_rows = out_buffers[parent_col_idx].size;
for (size_type id = 0; id < p_col.num_children; id++) {
auto const child_col_idx = index + id;
col_meta.parent_column_index[child_col_idx] = parent_col_idx;
if (type == type_id::STRUCT) {
parent_column_data[child_col_idx] = {parent_valid_map, parent_null_count};
// Number of rows in child will remain same as parent in case of struct column
num_child_rows[child_col_idx] = num_rows;
} else {
parent_column_data[child_col_idx] = {nullptr, 0};
}
}
index += p_col.num_children;
});
}
/**
* @brief struct to store buffer data and size of list buffer
*/
struct list_buffer_data {
size_type* data;
size_type size;
};
// Generates offsets for list buffer from number of elements in a row.
void generate_offsets_for_list(host_span<list_buffer_data> buff_data, rmm::cuda_stream_view stream)
{
for (auto& list_data : buff_data) {
thrust::exclusive_scan(rmm::exec_policy_nosync(stream),
list_data.data,
list_data.data + list_data.size,
list_data.data);
}
}
/**
* @brief Function that translates ORC data kind to cuDF type enum
*/
constexpr type_id to_cudf_type(orc::TypeKind kind,
bool use_np_dtypes,
type_id timestamp_type_id,
type_id decimal_type_id)
{
switch (kind) {
case orc::BOOLEAN: return type_id::BOOL8;
case orc::BYTE: return type_id::INT8;
case orc::SHORT: return type_id::INT16;
case orc::INT: return type_id::INT32;
case orc::LONG: return type_id::INT64;
case orc::FLOAT: return type_id::FLOAT32;
case orc::DOUBLE: return type_id::FLOAT64;
case orc::STRING:
case orc::BINARY:
case orc::VARCHAR:
case orc::CHAR:
// Variable-length types can all be mapped to STRING
return type_id::STRING;
case orc::TIMESTAMP:
return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id
: type_id::TIMESTAMP_NANOSECONDS;
case orc::DATE:
// There isn't a (DAYS -> np.dtype) mapping
return (use_np_dtypes) ? type_id::TIMESTAMP_MILLISECONDS : type_id::TIMESTAMP_DAYS;
case orc::DECIMAL: return decimal_type_id;
// Need to update once cuDF plans to support map type
case orc::MAP:
case orc::LIST: return type_id::LIST;
case orc::STRUCT: return type_id::STRUCT;
default: break;
}
return type_id::EMPTY;
}
/**
* @brief Determines cuDF type of an ORC Decimal column.
*/
type_id to_cudf_decimal_type(host_span<std::string const> decimal128_columns,
cudf::io::orc::detail::aggregate_orc_metadata const& metadata,
int column_index)
{
if (metadata.get_col_type(column_index).kind != DECIMAL) { return type_id::EMPTY; }
if (std::find(decimal128_columns.begin(),
decimal128_columns.end(),
metadata.column_path(0, column_index)) != decimal128_columns.end()) {
return type_id::DECIMAL128;
}
auto const precision = metadata.get_col_type(column_index)
.precision.value_or(cuda::std::numeric_limits<int64_t>::digits10);
if (precision <= cuda::std::numeric_limits<int32_t>::digits10) { return type_id::DECIMAL32; }
if (precision <= cuda::std::numeric_limits<int64_t>::digits10) { return type_id::DECIMAL64; }
return type_id::DECIMAL128;
}
std::string get_map_child_col_name(std::size_t const idx) { return (idx == 0) ? "key" : "value"; }
/**
* @brief Create empty columns and respective schema information from the buffer.
*/
std::unique_ptr<column> create_empty_column(
size_type orc_col_id,
cudf::io::orc::detail::aggregate_orc_metadata const& metadata,
host_span<std::string const> decimal128_columns,
bool use_np_dtypes,
data_type timestamp_type,
column_name_info& schema_info,
rmm::cuda_stream_view stream)
{
schema_info.name = metadata.column_name(0, orc_col_id);
auto const kind = metadata.get_col_type(orc_col_id).kind;
auto const type = to_cudf_type(kind,
use_np_dtypes,
timestamp_type.id(),
to_cudf_decimal_type(decimal128_columns, metadata, orc_col_id));
switch (kind) {
case orc::LIST: {
schema_info.children.emplace_back("offsets");
schema_info.children.emplace_back("");
return make_lists_column(0,
make_empty_column(type_id::INT32),
create_empty_column(metadata.get_col_type(orc_col_id).subtypes[0],
metadata,
decimal128_columns,
use_np_dtypes,
timestamp_type,
schema_info.children.back(),
stream),
0,
rmm::device_buffer{0, stream},
stream);
}
case orc::MAP: {
schema_info.children.emplace_back("offsets");
schema_info.children.emplace_back("struct");
auto const child_column_ids = metadata.get_col_type(orc_col_id).subtypes;
auto& children_schema = schema_info.children.back().children;
std::vector<std::unique_ptr<column>> child_columns;
for (std::size_t idx = 0; idx < metadata.get_col_type(orc_col_id).subtypes.size(); idx++) {
children_schema.emplace_back("");
child_columns.push_back(create_empty_column(child_column_ids[idx],
metadata,
decimal128_columns,
use_np_dtypes,
timestamp_type,
schema_info.children.back().children.back(),
stream));
children_schema[idx].name = get_map_child_col_name(idx);
}
return make_lists_column(
0,
make_empty_column(type_id::INT32),
make_structs_column(0, std::move(child_columns), 0, rmm::device_buffer{0, stream}, stream),
0,
rmm::device_buffer{0, stream},
stream);
}
case orc::STRUCT: {
std::vector<std::unique_ptr<column>> child_columns;
for (auto const col : metadata.get_col_type(orc_col_id).subtypes) {
schema_info.children.emplace_back("");
child_columns.push_back(create_empty_column(col,
metadata,
decimal128_columns,
use_np_dtypes,
timestamp_type,
schema_info.children.back(),
stream));
}
return make_structs_column(
0, std::move(child_columns), 0, rmm::device_buffer{0, stream}, stream);
}
case orc::DECIMAL: {
int32_t scale = 0;
if (type == type_id::DECIMAL32 or type == type_id::DECIMAL64 or type == type_id::DECIMAL128) {
scale = -static_cast<int32_t>(metadata.get_types()[orc_col_id].scale.value_or(0));
}
return make_empty_column(data_type(type, scale));
}
default: return make_empty_column(type);
}
}
/**
* @brief Assemble the buffer with child columns.
*/
column_buffer assemble_buffer(size_type orc_col_id,
std::size_t level,
reader_column_meta const& col_meta,
cudf::io::orc::detail::aggregate_orc_metadata const& metadata,
cudf::io::orc::detail::column_hierarchy const& selected_columns,
std::vector<std::vector<column_buffer>>& col_buffers,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const col_id = col_meta.orc_col_map[level][orc_col_id];
auto& col_buffer = col_buffers[level][col_id];
col_buffer.name = metadata.column_name(0, orc_col_id);
auto kind = metadata.get_col_type(orc_col_id).kind;
switch (kind) {
case orc::LIST:
case orc::STRUCT: {
auto const& children_indices = selected_columns.children.at(orc_col_id);
for (auto const child_id : children_indices) {
col_buffer.children.emplace_back(assemble_buffer(
child_id, level + 1, col_meta, metadata, selected_columns, col_buffers, stream, mr));
}
} break;
case orc::MAP: {
std::vector<column_buffer> child_col_buffers;
// Get child buffers
auto const& children_indices = selected_columns.children.at(orc_col_id);
for (std::size_t idx = 0; idx < children_indices.size(); idx++) {
auto const col = children_indices[idx];
child_col_buffers.emplace_back(assemble_buffer(
col, level + 1, col_meta, metadata, selected_columns, col_buffers, stream, mr));
child_col_buffers.back().name = get_map_child_col_name(idx);
}
// Create a struct buffer
auto num_rows = child_col_buffers[0].size;
auto struct_buffer =
column_buffer(cudf::data_type(type_id::STRUCT), num_rows, false, stream, mr);
struct_buffer.children = std::move(child_col_buffers);
struct_buffer.name = "struct";
col_buffer.children.emplace_back(std::move(struct_buffer));
} break;
default: break;
}
return std::move(col_buffer);
}
} // namespace
reader::impl::impl(std::vector<std::unique_ptr<datasource>>&& sources,
orc_reader_options const& options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _stream(stream),
_mr(mr),
_sources(std::move(sources)),
_metadata{_sources, stream},
_selected_columns{_metadata.select_columns(options.get_columns())},
_timestamp_type{options.get_timestamp_type()},
_use_index{options.is_enabled_use_index()},
_use_np_dtypes{options.is_enabled_use_np_dtypes()},
_decimal128_columns{options.get_decimal128_columns()},
_col_meta{std::make_unique<reader_column_meta>()}
{
}
table_with_metadata reader::impl::read(uint64_t skip_rows,
std::optional<size_type> const& num_rows_opt,
std::vector<std::vector<size_type>> const& stripes)
{
// Selected columns at different levels of nesting are stored in different elements
// of `selected_columns`; thus, size == 1 means no nested columns
CUDF_EXPECTS(skip_rows == 0 or _selected_columns.num_levels() == 1,
"skip_rows is not supported by nested columns");
// There are no columns in the table
if (_selected_columns.num_levels() == 0) { return {std::make_unique<table>(), table_metadata{}}; }
std::vector<std::vector<column_buffer>> out_buffers(_selected_columns.num_levels());
std::vector<std::unique_ptr<column>> out_columns;
table_metadata out_metadata;
// Copy user data to the output metadata.
std::transform(_metadata.per_file_metadata.cbegin(),
_metadata.per_file_metadata.cend(),
std::back_inserter(out_metadata.per_file_user_data),
[](auto& meta) {
std::unordered_map<std::string, std::string> kv_map;
std::transform(meta.ff.metadata.cbegin(),
meta.ff.metadata.cend(),
std::inserter(kv_map, kv_map.end()),
[](auto const& kv) {
return std::pair{kv.name, kv.value};
});
return kv_map;
});
out_metadata.user_data = {out_metadata.per_file_user_data[0].begin(),
out_metadata.per_file_user_data[0].end()};
// Select only stripes required (aka row groups)
auto const [rows_to_skip, rows_to_read, selected_stripes] =
_metadata.select_stripes(stripes, skip_rows, num_rows_opt, _stream);
// If no rows or stripes to read, return empty columns
if (rows_to_read == 0 || selected_stripes.empty()) {
std::transform(_selected_columns.levels[0].begin(),
_selected_columns.levels[0].end(),
std::back_inserter(out_columns),
[&](auto const col_meta) {
out_metadata.schema_info.emplace_back("");
return create_empty_column(col_meta.id,
_metadata,
_decimal128_columns,
_use_np_dtypes,
_timestamp_type,
out_metadata.schema_info.back(),
_stream);
});
return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)};
}
// Set up table for converting timestamp columns from local to UTC time
auto const tz_table = [&, &selected_stripes = selected_stripes] {
auto const has_timestamp_column = std::any_of(
_selected_columns.levels.cbegin(), _selected_columns.levels.cend(), [&](auto const& col_lvl) {
return std::any_of(col_lvl.cbegin(), col_lvl.cend(), [&](auto const& col_meta) {
return _metadata.get_col_type(col_meta.id).kind == TypeKind::TIMESTAMP;
});
});
return has_timestamp_column
? cudf::detail::make_timezone_transition_table(
{}, selected_stripes[0].stripe_info[0].second->writerTimezone, _stream)
: std::make_unique<cudf::table>();
}();
std::vector<std::vector<rmm::device_buffer>> lvl_stripe_data(_selected_columns.num_levels());
std::vector<std::vector<rmm::device_uvector<uint32_t>>> null_count_prefix_sums;
// Iterates through levels of nested columns, child column will be one level down
// compared to parent column.
auto& col_meta = *_col_meta;
for (std::size_t level = 0; level < _selected_columns.num_levels(); ++level) {
auto& columns_level = _selected_columns.levels[level];
// Association between each ORC column and its cudf::column
col_meta.orc_col_map.emplace_back(_metadata.get_num_cols(), -1);
std::vector<orc_column_meta> nested_col;
// Get a list of column data types
std::vector<data_type> column_types;
for (auto& col : columns_level) {
auto col_type = to_cudf_type(_metadata.get_col_type(col.id).kind,
_use_np_dtypes,
_timestamp_type.id(),
to_cudf_decimal_type(_decimal128_columns, _metadata, col.id));
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
if (col_type == type_id::DECIMAL32 or col_type == type_id::DECIMAL64 or
col_type == type_id::DECIMAL128) {
// sign of the scale is changed since cuDF follows c++ libraries like CNL
// which uses negative scaling, but liborc and other libraries
// follow positive scaling.
auto const scale =
-static_cast<size_type>(_metadata.get_col_type(col.id).scale.value_or(0));
column_types.emplace_back(col_type, scale);
} else {
column_types.emplace_back(col_type);
}
// Map each ORC column to its column
col_meta.orc_col_map[level][col.id] = column_types.size() - 1;
if (col_type == type_id::LIST or col_type == type_id::STRUCT) {
nested_col.emplace_back(col);
}
}
// Get the total number of stripes across all input files.
std::size_t total_num_stripes =
std::accumulate(selected_stripes.begin(),
selected_stripes.end(),
0,
[](std::size_t sum, auto& stripe_source_mapping) {
return sum + stripe_source_mapping.stripe_info.size();
});
auto const num_columns = columns_level.size();
cudf::detail::hostdevice_2dvector<gpu::ColumnDesc> chunks(
total_num_stripes, num_columns, _stream);
memset(chunks.base_host_ptr(), 0, chunks.size_bytes());
const bool use_index =
_use_index &&
// Do stripes have row group index
_metadata.is_row_grp_idx_present() &&
// Only use if we don't have much work with complete columns & stripes
// TODO: Consider nrows, gpu, and tune the threshold
(rows_to_read > _metadata.get_row_index_stride() && !(_metadata.get_row_index_stride() & 7) &&
_metadata.get_row_index_stride() > 0 && num_columns * total_num_stripes < 8 * 128) &&
// Only use if first row is aligned to a stripe boundary
// TODO: Fix logic to handle unaligned rows
(rows_to_skip == 0);
// Logically view streams as columns
std::vector<orc_stream_info> stream_info;
null_count_prefix_sums.emplace_back();
null_count_prefix_sums.back().reserve(_selected_columns.levels[level].size());
std::generate_n(std::back_inserter(null_count_prefix_sums.back()),
_selected_columns.levels[level].size(),
[&]() {
return cudf::detail::make_zeroed_device_uvector_async<uint32_t>(
total_num_stripes, _stream, rmm::mr::get_current_device_resource());
});
// Tracker for eventually deallocating compressed and uncompressed data
auto& stripe_data = lvl_stripe_data[level];
std::size_t stripe_start_row = 0;
std::size_t num_dict_entries = 0;
std::size_t num_rowgroups = 0;
int stripe_idx = 0;
std::vector<std::pair<std::future<std::size_t>, std::size_t>> read_tasks;
for (auto const& stripe_source_mapping : selected_stripes) {
// Iterate through the source files selected stripes
for (auto const& stripe : stripe_source_mapping.stripe_info) {
auto const stripe_info = stripe.first;
auto const stripe_footer = stripe.second;
auto stream_count = stream_info.size();
auto const total_data_size = gather_stream_info(stripe_idx,
stripe_info,
stripe_footer,
col_meta.orc_col_map[level],
_metadata.get_types(),
use_index,
level == 0,
&num_dict_entries,
stream_info,
chunks);
auto const is_stripe_data_empty = total_data_size == 0;
CUDF_EXPECTS(not is_stripe_data_empty or stripe_info->indexLength == 0,
"Invalid index rowgroup stream data");
// Buffer needs to be padded.
// Required by `copy_uncompressed_kernel`.
stripe_data.emplace_back(
cudf::util::round_up_safe(total_data_size, BUFFER_PADDING_MULTIPLE), _stream);
auto dst_base = static_cast<uint8_t*>(stripe_data.back().data());
// Coalesce consecutive streams into one read
while (not is_stripe_data_empty and stream_count < stream_info.size()) {
auto const d_dst = dst_base + stream_info[stream_count].dst_pos;
auto const offset = stream_info[stream_count].offset;
auto len = stream_info[stream_count].length;
stream_count++;
while (stream_count < stream_info.size() &&
stream_info[stream_count].offset == offset + len) {
len += stream_info[stream_count].length;
stream_count++;
}
if (_metadata.per_file_metadata[stripe_source_mapping.source_idx]
.source->is_device_read_preferred(len)) {
read_tasks.push_back(
std::pair(_metadata.per_file_metadata[stripe_source_mapping.source_idx]
.source->device_read_async(offset, len, d_dst, _stream),
len));
} else {
auto const buffer =
_metadata.per_file_metadata[stripe_source_mapping.source_idx].source->host_read(
offset, len);
CUDF_EXPECTS(buffer->size() == len, "Unexpected discrepancy in bytes read.");
CUDF_CUDA_TRY(
hipMemcpyAsync(d_dst, buffer->data(), len, hipMemcpyDefault, _stream.value()));
_stream.synchronize();
}
}
auto const num_rows_per_stripe = stripe_info->numberOfRows;
auto const rowgroup_id = num_rowgroups;
auto stripe_num_rowgroups = 0;
if (use_index) {
stripe_num_rowgroups = (num_rows_per_stripe + _metadata.get_row_index_stride() - 1) /
_metadata.get_row_index_stride();
}
// Update chunks to reference streams pointers
for (std::size_t col_idx = 0; col_idx < num_columns; col_idx++) {
auto& chunk = chunks[stripe_idx][col_idx];
// start row, number of rows in a each stripe and total number of rows
// may change in lower levels of nesting
chunk.start_row = (level == 0)
? stripe_start_row
: col_meta.child_start_row[stripe_idx * num_columns + col_idx];
chunk.num_rows =
(level == 0) ? stripe_info->numberOfRows
: col_meta.num_child_rows_per_stripe[stripe_idx * num_columns + col_idx];
chunk.column_num_rows = (level == 0) ? rows_to_read : col_meta.num_child_rows[col_idx];
chunk.parent_validity_info =
(level == 0) ? column_validity_info{} : col_meta.parent_column_data[col_idx];
chunk.parent_null_count_prefix_sums =
(level == 0)
? nullptr
: null_count_prefix_sums[level - 1][col_meta.parent_column_index[col_idx]].data();
chunk.encoding_kind = stripe_footer->columns[columns_level[col_idx].id].kind;
chunk.type_kind = _metadata.per_file_metadata[stripe_source_mapping.source_idx]
.ff.types[columns_level[col_idx].id]
.kind;
// num_child_rows for a struct column will be same, for other nested types it will be
// calculated.
chunk.num_child_rows = (chunk.type_kind != orc::STRUCT) ? 0 : chunk.num_rows;
chunk.dtype_id = column_types[col_idx].id();
chunk.decimal_scale = _metadata.per_file_metadata[stripe_source_mapping.source_idx]
.ff.types[columns_level[col_idx].id]
.scale.value_or(0);
chunk.rowgroup_id = rowgroup_id;
chunk.dtype_len = (column_types[col_idx].id() == type_id::STRING)
? sizeof(string_index_pair)
: ((column_types[col_idx].id() == type_id::LIST) or
(column_types[col_idx].id() == type_id::STRUCT))
? sizeof(size_type)
: cudf::size_of(column_types[col_idx]);
chunk.num_rowgroups = stripe_num_rowgroups;
if (chunk.type_kind == orc::TIMESTAMP) { chunk.timestamp_type_id = _timestamp_type.id(); }
if (not is_stripe_data_empty) {
for (int k = 0; k < gpu::CI_NUM_STREAMS; k++) {
chunk.streams[k] = dst_base + stream_info[chunk.strm_id[k]].dst_pos;
}
}
}
stripe_start_row += num_rows_per_stripe;
num_rowgroups += stripe_num_rowgroups;
stripe_idx++;
}
}
for (auto& task : read_tasks) {
CUDF_EXPECTS(task.first.get() == task.second, "Unexpected discrepancy in bytes read.");
}
if (stripe_data.empty()) { continue; }
// Process dataset chunk pages into output columns
auto row_groups =
cudf::detail::hostdevice_2dvector<gpu::RowGroup>(num_rowgroups, num_columns, _stream);
if (level > 0 and row_groups.size().first) {
cudf::host_span<gpu::RowGroup> row_groups_span(row_groups.base_host_ptr(),
num_rowgroups * num_columns);
auto& rw_grp_meta = col_meta.rwgrp_meta;
// Update start row and num rows per row group
std::transform(rw_grp_meta.begin(),
rw_grp_meta.end(),
row_groups_span.begin(),
rw_grp_meta.begin(),
[&](auto meta, auto& row_grp) {
row_grp.num_rows = meta.num_rows;
row_grp.start_row = meta.start_row;
return meta;
});
}
// Setup row group descriptors if using indexes
if (_metadata.per_file_metadata[0].ps.compression != orc::NONE) {
auto decomp_data = decompress_stripe_data(*_metadata.per_file_metadata[0].decompressor,
stripe_data,
stream_info,
chunks,
row_groups,
total_num_stripes,
_metadata.get_row_index_stride(),
level == 0,
_stream);
stripe_data.clear();
stripe_data.push_back(std::move(decomp_data));
} else {
if (row_groups.size().first) {
chunks.host_to_device_async(_stream);
row_groups.host_to_device_async(_stream);
gpu::ParseRowGroupIndex(row_groups.base_device_ptr(),
nullptr,
chunks.base_device_ptr(),
num_columns,
total_num_stripes,
num_rowgroups,
_metadata.get_row_index_stride(),
level == 0,
_stream);
}
}
for (std::size_t i = 0; i < column_types.size(); ++i) {
bool is_nullable = false;
for (std::size_t j = 0; j < total_num_stripes; ++j) {
if (chunks[j][i].strm_len[gpu::CI_PRESENT] != 0) {
is_nullable = true;
break;
}
}
auto is_list_type = (column_types[i].id() == type_id::LIST);
auto n_rows = (level == 0) ? rows_to_read : col_meta.num_child_rows[i];
// For list column, offset column will be always size + 1
if (is_list_type) n_rows++;
out_buffers[level].emplace_back(column_types[i], n_rows, is_nullable, _stream, _mr);
}
decode_stream_data(num_dict_entries,
rows_to_skip,
_metadata.get_row_index_stride(),
level,
tz_table->view(),
chunks,
row_groups,
out_buffers[level],
_stream,
_mr);
if (nested_col.size()) {
// Extract information to process nested child columns
scan_null_counts(chunks, null_count_prefix_sums[level], _stream);
row_groups.device_to_host_sync(_stream);
aggregate_child_meta(
level, _selected_columns, chunks, row_groups, nested_col, out_buffers[level], col_meta);
// ORC stores number of elements at each row, so we need to generate offsets from that
std::vector<list_buffer_data> buff_data;
std::for_each(
out_buffers[level].begin(), out_buffers[level].end(), [&buff_data](auto& out_buffer) {
if (out_buffer.type.id() == type_id::LIST) {
auto data = static_cast<size_type*>(out_buffer.data());
buff_data.emplace_back(list_buffer_data{data, out_buffer.size});
}
});
if (not buff_data.empty()) { generate_offsets_for_list(buff_data, _stream); }
}
}
// Create columns from buffer with respective schema information.
std::transform(
_selected_columns.levels[0].begin(),
_selected_columns.levels[0].end(),
std::back_inserter(out_columns),
[&](auto const& orc_col_meta) {
out_metadata.schema_info.emplace_back("");
auto col_buffer = assemble_buffer(
orc_col_meta.id, 0, col_meta, _metadata, _selected_columns, out_buffers, _stream, _mr);
return make_column(col_buffer, &out_metadata.schema_info.back(), std::nullopt, _stream);
});
return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)};
}
// Forward to implementation
reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>>&& sources,
orc_reader_options const& options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _impl{std::make_unique<impl>(std::move(sources), options, stream, mr)}
{
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read(orc_reader_options const& options)
{
return _impl->read(options.get_skip_rows(), options.get_num_rows(), options.get_stripes());
}
} // namespace cudf::io::detail::orc
| eb4b15a3820a2326137a9448a2c858498a6bb18c.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO ORC reader class implementation
*/
#include "orc.hpp"
#include "orc_gpu.hpp"
#include "reader_impl.hpp"
#include <io/comp/gpuinflate.hpp>
#include <io/comp/nvcomp_adapter.hpp>
#include <io/utilities/config_utils.hpp>
#include <cudf/detail/timezone.hpp>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/pair.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <algorithm>
#include <iterator>
namespace cudf::io::detail::orc {
using namespace cudf::io::orc;
namespace {
/**
* @brief Keeps track of orc mapping and child column details.
*/
struct reader_column_meta {
// Mapping between column id in orc to processing order.
std::vector<std::vector<size_type>> orc_col_map;
// Number of rows in child columns.
std::vector<uint32_t> num_child_rows;
// Consists of parent column valid_map and null count.
std::vector<column_validity_info> parent_column_data;
std::vector<size_type> parent_column_index;
// Start row of child columns [stripe][column].
std::vector<uint32_t> child_start_row;
// Number of rows of child columns [stripe][column].
std::vector<uint32_t> num_child_rows_per_stripe;
struct row_group_meta {
uint32_t num_rows; // number of rows in a column in a row group
uint32_t start_row; // start row in a column in a row group
};
// Row group metadata [rowgroup][column].
std::vector<row_group_meta> rwgrp_meta;
};
/**
* @brief Struct that maps ORC streams to columns
*/
struct orc_stream_info {
explicit orc_stream_info(uint64_t offset_,
std::size_t dst_pos_,
uint32_t length_,
uint32_t stripe_idx_)
: offset(offset_), dst_pos(dst_pos_), length(length_), stripe_idx(stripe_idx_)
{
}
uint64_t offset; // offset in file
std::size_t dst_pos; // offset in memory relative to start of compressed stripe data
std::size_t length; // length in file
uint32_t stripe_idx; // stripe index
};
/**
* @brief Function that populates column descriptors stream/chunk
*/
std::size_t gather_stream_info(std::size_t stripe_index,
orc::StripeInformation const* stripeinfo,
orc::StripeFooter const* stripefooter,
host_span<int const> orc2gdf,
host_span<orc::SchemaType const> types,
bool use_index,
bool apply_struct_map,
std::size_t* num_dictionary_entries,
std::vector<orc_stream_info>& stream_info,
cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks)
{
uint64_t src_offset = 0;
uint64_t dst_offset = 0;
auto const get_stream_index_type = [](orc::StreamKind kind) {
switch (kind) {
case orc::DATA: return gpu::CI_DATA;
case orc::LENGTH:
case orc::SECONDARY: return gpu::CI_DATA2;
case orc::DICTIONARY_DATA: return gpu::CI_DICTIONARY;
case orc::PRESENT: return gpu::CI_PRESENT;
case orc::ROW_INDEX: return gpu::CI_INDEX;
default:
// Skip this stream as it's not strictly required
return gpu::CI_NUM_STREAMS;
}
};
for (auto const& stream : stripefooter->streams) {
if (!stream.column_id || *stream.column_id >= orc2gdf.size()) {
dst_offset += stream.length;
continue;
}
auto const column_id = *stream.column_id;
auto col = orc2gdf[column_id];
if (col == -1 and apply_struct_map) {
// A struct-type column has no data itself, but rather child columns
// for each of its fields. There is only a PRESENT stream, which
// needs to be included for the reader.
auto const schema_type = types[column_id];
if (not schema_type.subtypes.empty()) {
if (schema_type.kind == orc::STRUCT && stream.kind == orc::PRESENT) {
for (auto const& idx : schema_type.subtypes) {
auto child_idx = (idx < orc2gdf.size()) ? orc2gdf[idx] : -1;
if (child_idx >= 0) {
col = child_idx;
auto& chunk = chunks[stripe_index][col];
chunk.strm_id[gpu::CI_PRESENT] = stream_info.size();
chunk.strm_len[gpu::CI_PRESENT] = stream.length;
}
}
}
}
}
if (col != -1) {
if (src_offset >= stripeinfo->indexLength || use_index) {
auto& chunk = chunks[stripe_index][col];
auto const index_type = get_stream_index_type(stream.kind);
if (index_type < gpu::CI_NUM_STREAMS) {
chunk.strm_id[index_type] = stream_info.size();
chunk.strm_len[index_type] = stream.length;
// NOTE: skip_count field is temporarily used to track the presence of index streams
chunk.skip_count |= 1 << index_type;
if (index_type == gpu::CI_DICTIONARY) {
chunk.dictionary_start = *num_dictionary_entries;
chunk.dict_len = stripefooter->columns[column_id].dictionarySize;
*num_dictionary_entries += stripefooter->columns[column_id].dictionarySize;
}
}
}
stream_info.emplace_back(
stripeinfo->offset + src_offset, dst_offset, stream.length, stripe_index);
dst_offset += stream.length;
}
src_offset += stream.length;
}
return dst_offset;
}
/**
* @brief Decompresses the stripe data, at stream granularity.
*
* @param decompressor Block decompressor
* @param stripe_data List of source stripe column data
* @param stream_info List of stream to column mappings
* @param chunks Vector of list of column chunk descriptors
* @param row_groups Vector of list of row index descriptors
* @param num_stripes Number of stripes making up column chunks
* @param row_index_stride Distance between each row index
* @param use_base_stride Whether to use base stride obtained from meta or use the computed value
* @param stream CUDA stream used for device memory operations and kernel launches
* @return Device buffer to decompressed page data
*/
rmm::device_buffer decompress_stripe_data(
OrcDecompressor const& decompressor,
host_span<rmm::device_buffer const> stripe_data,
host_span<orc_stream_info> stream_info,
cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks,
cudf::detail::hostdevice_2dvector<gpu::RowGroup>& row_groups,
std::size_t num_stripes,
std::size_t row_index_stride,
bool use_base_stride,
rmm::cuda_stream_view stream)
{
// Parse the columns' compressed info
cudf::detail::hostdevice_vector<gpu::CompressedStreamInfo> compinfo(
0, stream_info.size(), stream);
for (auto const& info : stream_info) {
compinfo.push_back(gpu::CompressedStreamInfo(
static_cast<uint8_t const*>(stripe_data[info.stripe_idx].data()) + info.dst_pos,
info.length));
}
compinfo.host_to_device_async(stream);
gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor.GetBlockSize(),
decompressor.GetLog2MaxCompressionRatio(),
stream);
compinfo.device_to_host_sync(stream);
// Count the exact number of compressed blocks
std::size_t num_compressed_blocks = 0;
std::size_t num_uncompressed_blocks = 0;
std::size_t total_decomp_size = 0;
for (std::size_t i = 0; i < compinfo.size(); ++i) {
num_compressed_blocks += compinfo[i].num_compressed_blocks;
num_uncompressed_blocks += compinfo[i].num_uncompressed_blocks;
total_decomp_size += compinfo[i].max_uncompressed_size;
}
CUDF_EXPECTS(
not((num_uncompressed_blocks + num_compressed_blocks > 0) and (total_decomp_size == 0)),
"Inconsistent info on compression blocks");
// Buffer needs to be padded.
// Required by `gpuDecodeOrcColumnData`.
rmm::device_buffer decomp_data(
cudf::util::round_up_safe(total_decomp_size, BUFFER_PADDING_MULTIPLE), stream);
if (decomp_data.is_empty()) { return decomp_data; }
rmm::device_uvector<device_span<uint8_t const>> inflate_in(
num_compressed_blocks + num_uncompressed_blocks, stream);
rmm::device_uvector<device_span<uint8_t>> inflate_out(
num_compressed_blocks + num_uncompressed_blocks, stream);
rmm::device_uvector<compression_result> inflate_res(num_compressed_blocks, stream);
thrust::fill(rmm::exec_policy(stream),
inflate_res.begin(),
inflate_res.end(),
compression_result{0, compression_status::FAILURE});
// Parse again to populate the decompression input/output buffers
std::size_t decomp_offset = 0;
uint32_t max_uncomp_block_size = 0;
uint32_t start_pos = 0;
auto start_pos_uncomp = (uint32_t)num_compressed_blocks;
for (std::size_t i = 0; i < compinfo.size(); ++i) {
auto dst_base = static_cast<uint8_t*>(decomp_data.data());
compinfo[i].uncompressed_data = dst_base + decomp_offset;
compinfo[i].dec_in_ctl = inflate_in.data() + start_pos;
compinfo[i].dec_out_ctl = inflate_out.data() + start_pos;
compinfo[i].dec_res = {inflate_res.data() + start_pos, compinfo[i].num_compressed_blocks};
compinfo[i].copy_in_ctl = inflate_in.data() + start_pos_uncomp;
compinfo[i].copy_out_ctl = inflate_out.data() + start_pos_uncomp;
stream_info[i].dst_pos = decomp_offset;
decomp_offset += compinfo[i].max_uncompressed_size;
start_pos += compinfo[i].num_compressed_blocks;
start_pos_uncomp += compinfo[i].num_uncompressed_blocks;
max_uncomp_block_size =
std::max(max_uncomp_block_size, compinfo[i].max_uncompressed_block_size);
}
compinfo.host_to_device_async(stream);
gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor.GetBlockSize(),
decompressor.GetLog2MaxCompressionRatio(),
stream);
// Value for checking whether we decompress successfully.
// It doesn't need to be atomic as there is no race condition: we only write `true` if needed.
cudf::detail::hostdevice_vector<bool> any_block_failure(1, stream);
any_block_failure[0] = false;
any_block_failure.host_to_device_async(stream);
// Dispatch batches of blocks to decompress
if (num_compressed_blocks > 0) {
device_span<device_span<uint8_t const>> inflate_in_view{inflate_in.data(),
num_compressed_blocks};
device_span<device_span<uint8_t>> inflate_out_view{inflate_out.data(), num_compressed_blocks};
switch (decompressor.compression()) {
case compression_type::ZLIB:
if (nvcomp::is_decompression_disabled(nvcomp::compression_type::DEFLATE)) {
gpuinflate(
inflate_in_view, inflate_out_view, inflate_res, gzip_header_included::NO, stream);
} else {
nvcomp::batched_decompress(nvcomp::compression_type::DEFLATE,
inflate_in_view,
inflate_out_view,
inflate_res,
max_uncomp_block_size,
total_decomp_size,
stream);
}
break;
case compression_type::SNAPPY:
if (nvcomp::is_decompression_disabled(nvcomp::compression_type::SNAPPY)) {
gpu_unsnap(inflate_in_view, inflate_out_view, inflate_res, stream);
} else {
nvcomp::batched_decompress(nvcomp::compression_type::SNAPPY,
inflate_in_view,
inflate_out_view,
inflate_res,
max_uncomp_block_size,
total_decomp_size,
stream);
}
break;
case compression_type::ZSTD:
if (auto const reason = nvcomp::is_decompression_disabled(nvcomp::compression_type::ZSTD);
reason) {
CUDF_FAIL("Decompression error: " + reason.value());
}
nvcomp::batched_decompress(nvcomp::compression_type::ZSTD,
inflate_in_view,
inflate_out_view,
inflate_res,
max_uncomp_block_size,
total_decomp_size,
stream);
break;
default: CUDF_FAIL("Unexpected decompression dispatch"); break;
}
// Check if any block has been failed to decompress.
// Not using `thrust::any` or `thrust::count_if` to defer stream sync.
thrust::for_each(
rmm::exec_policy(stream),
thrust::make_counting_iterator(std::size_t{0}),
thrust::make_counting_iterator(inflate_res.size()),
[results = inflate_res.begin(),
any_block_failure = any_block_failure.device_ptr()] __device__(auto const idx) {
if (results[idx].status != compression_status::SUCCESS) { *any_block_failure = true; }
});
}
if (num_uncompressed_blocks > 0) {
device_span<device_span<uint8_t const>> copy_in_view{inflate_in.data() + num_compressed_blocks,
num_uncompressed_blocks};
device_span<device_span<uint8_t>> copy_out_view{inflate_out.data() + num_compressed_blocks,
num_uncompressed_blocks};
gpu_copy_uncompressed_blocks(copy_in_view, copy_out_view, stream);
}
// Copy without stream sync, thus need to wait for stream sync below to access.
any_block_failure.device_to_host_async(stream);
gpu::PostDecompressionReassemble(compinfo.device_ptr(), compinfo.size(), stream);
compinfo.device_to_host_sync(stream); // This also sync stream for `any_block_failure`.
// We can check on host after stream synchronize
CUDF_EXPECTS(not any_block_failure[0], "Error during decompression");
auto const num_columns = chunks.size().second;
// Update the stream information with the updated uncompressed info
// TBD: We could update the value from the information we already
// have in stream_info[], but using the gpu results also updates
// max_uncompressed_size to the actual uncompressed size, or zero if
// decompression failed.
for (std::size_t i = 0; i < num_stripes; ++i) {
for (std::size_t j = 0; j < num_columns; ++j) {
auto& chunk = chunks[i][j];
for (int k = 0; k < gpu::CI_NUM_STREAMS; ++k) {
if (chunk.strm_len[k] > 0 && chunk.strm_id[k] < compinfo.size()) {
chunk.streams[k] = compinfo[chunk.strm_id[k]].uncompressed_data;
chunk.strm_len[k] = compinfo[chunk.strm_id[k]].max_uncompressed_size;
}
}
}
}
if (row_groups.size().first) {
chunks.host_to_device_async(stream);
row_groups.host_to_device_async(stream);
gpu::ParseRowGroupIndex(row_groups.base_device_ptr(),
compinfo.device_ptr(),
chunks.base_device_ptr(),
num_columns,
num_stripes,
row_groups.size().first,
row_index_stride,
use_base_stride,
stream);
}
return decomp_data;
}
/**
* @brief Updates null mask of columns whose parent is a struct column.
*
* If struct column has null element, that row would be skipped while writing child column in ORC,
* so we need to insert the missing null elements in child column. There is another behavior from
* pyspark, where if the child column doesn't have any null elements, it will not have present
* stream, so in that case parent null mask need to be copied to child column.
*
* @param chunks Vector of list of column chunk descriptors
* @param out_buffers Output columns' device buffers
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource to use for device memory allocation
*/
void update_null_mask(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks,
host_span<column_buffer> out_buffers,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_stripes = chunks.size().first;
auto const num_columns = chunks.size().second;
bool is_mask_updated = false;
for (std::size_t col_idx = 0; col_idx < num_columns; ++col_idx) {
if (chunks[0][col_idx].parent_validity_info.valid_map_base != nullptr) {
if (not is_mask_updated) {
chunks.device_to_host_sync(stream);
is_mask_updated = true;
}
auto parent_valid_map_base = chunks[0][col_idx].parent_validity_info.valid_map_base;
auto child_valid_map_base = out_buffers[col_idx].null_mask();
auto child_mask_len =
chunks[0][col_idx].column_num_rows - chunks[0][col_idx].parent_validity_info.null_count;
auto parent_mask_len = chunks[0][col_idx].column_num_rows;
if (child_valid_map_base != nullptr) {
rmm::device_uvector<uint32_t> dst_idx(child_mask_len, stream);
// Copy indexes at which the parent has valid value.
thrust::copy_if(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + parent_mask_len,
dst_idx.begin(),
[parent_valid_map_base] __device__(auto idx) {
return bit_is_set(parent_valid_map_base, idx);
});
auto merged_null_mask = cudf::detail::create_null_mask(
parent_mask_len, mask_state::ALL_NULL, rmm::cuda_stream_view(stream), mr);
auto merged_mask = static_cast<bitmask_type*>(merged_null_mask.data());
uint32_t* dst_idx_ptr = dst_idx.data();
// Copy child valid bits from child column to valid indexes, this will merge both child
// and parent null masks
thrust::for_each(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + dst_idx.size(),
[child_valid_map_base, dst_idx_ptr, merged_mask] __device__(auto idx) {
if (bit_is_set(child_valid_map_base, idx)) {
cudf::set_bit(merged_mask, dst_idx_ptr[idx]);
};
});
out_buffers[col_idx].set_null_mask(std::move(merged_null_mask));
} else {
// Since child column doesn't have a mask, copy parent null mask
auto mask_size = bitmask_allocation_size_bytes(parent_mask_len);
out_buffers[col_idx].set_null_mask(
rmm::device_buffer(static_cast<void*>(parent_valid_map_base), mask_size, stream, mr));
}
}
}
if (is_mask_updated) {
// Update chunks with pointers to column data which might have been changed.
for (std::size_t stripe_idx = 0; stripe_idx < num_stripes; ++stripe_idx) {
for (std::size_t col_idx = 0; col_idx < num_columns; ++col_idx) {
auto& chunk = chunks[stripe_idx][col_idx];
chunk.valid_map_base = out_buffers[col_idx].null_mask();
}
}
chunks.host_to_device_sync(stream);
}
}
/**
* @brief Converts the stripe column data and outputs to columns.
*
* @param num_dicts Number of dictionary entries required
* @param skip_rows Number of rows to offset from start
* @param row_index_stride Distance between each row index
* @param level Current nesting level being processed
* @param tz_table Local time to UTC conversion table
* @param chunks Vector of list of column chunk descriptors
* @param row_groups Vector of list of row index descriptors
* @param out_buffers Output columns' device buffers
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource to use for device memory allocation
*/
void decode_stream_data(std::size_t num_dicts,
std::size_t skip_rows,
std::size_t row_index_stride,
std::size_t level,
table_view const& tz_table,
cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks,
cudf::detail::device_2dspan<gpu::RowGroup> row_groups,
std::vector<column_buffer>& out_buffers,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_stripes = chunks.size().first;
auto const num_columns = chunks.size().second;
thrust::counting_iterator<int> col_idx_it(0);
thrust::counting_iterator<int> stripe_idx_it(0);
// Update chunks with pointers to column data
std::for_each(stripe_idx_it, stripe_idx_it + num_stripes, [&](auto stripe_idx) {
std::for_each(col_idx_it, col_idx_it + num_columns, [&](auto col_idx) {
auto& chunk = chunks[stripe_idx][col_idx];
chunk.column_data_base = out_buffers[col_idx].data();
chunk.valid_map_base = out_buffers[col_idx].null_mask();
});
});
// Allocate global dictionary for deserializing
rmm::device_uvector<gpu::DictionaryEntry> global_dict(num_dicts, stream);
chunks.host_to_device_sync(stream);
gpu::DecodeNullsAndStringDictionaries(
chunks.base_device_ptr(), global_dict.data(), num_columns, num_stripes, skip_rows, stream);
if (level > 0) {
// Update nullmasks for children if parent was a struct and had null mask
update_null_mask(chunks, out_buffers, stream, mr);
}
auto const tz_table_dptr = table_device_view::create(tz_table, stream);
rmm::device_scalar<size_type> error_count(0, stream);
// Update the null map for child columns
gpu::DecodeOrcColumnData(chunks.base_device_ptr(),
global_dict.data(),
row_groups,
num_columns,
num_stripes,
skip_rows,
*tz_table_dptr,
row_groups.size().first,
row_index_stride,
level,
error_count.data(),
stream);
chunks.device_to_host_async(stream);
// `value` synchronizes
auto const num_errors = error_count.value(stream);
CUDF_EXPECTS(num_errors == 0, "ORC data decode failed");
std::for_each(col_idx_it + 0, col_idx_it + num_columns, [&](auto col_idx) {
out_buffers[col_idx].null_count() =
std::accumulate(stripe_idx_it + 0,
stripe_idx_it + num_stripes,
0,
[&](auto null_count, auto const stripe_idx) {
return null_count + chunks[stripe_idx][col_idx].null_count;
});
});
}
/**
* @brief Compute the per-stripe prefix sum of null count, for each struct column in the current
* layer.
*/
void scan_null_counts(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc> const& chunks,
cudf::host_span<rmm::device_uvector<uint32_t>> prefix_sums,
rmm::cuda_stream_view stream)
{
auto const num_stripes = chunks.size().first;
if (num_stripes == 0) return;
auto const num_columns = chunks.size().second;
std::vector<thrust::pair<size_type, cudf::device_span<uint32_t>>> prefix_sums_to_update;
for (auto col_idx = 0ul; col_idx < num_columns; ++col_idx) {
// Null counts sums are only needed for children of struct columns
if (chunks[0][col_idx].type_kind == STRUCT) {
prefix_sums_to_update.emplace_back(col_idx, prefix_sums[col_idx]);
}
}
auto const d_prefix_sums_to_update = cudf::detail::make_device_uvector_async(
prefix_sums_to_update, stream, rmm::mr::get_current_device_resource());
thrust::for_each(rmm::exec_policy(stream),
d_prefix_sums_to_update.begin(),
d_prefix_sums_to_update.end(),
[chunks = cudf::detail::device_2dspan<gpu::ColumnDesc const>{chunks}] __device__(
auto const& idx_psums) {
auto const col_idx = idx_psums.first;
auto const psums = idx_psums.second;
thrust::transform(
thrust::seq,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + psums.size(),
psums.begin(),
[&](auto stripe_idx) { return chunks[stripe_idx][col_idx].null_count; });
thrust::inclusive_scan(thrust::seq, psums.begin(), psums.end(), psums.begin());
});
// `prefix_sums_to_update` goes out of scope, copy has to be done before we return
stream.synchronize();
}
/**
* @brief Aggregate child metadata from parent column chunks.
*/
void aggregate_child_meta(std::size_t level,
cudf::io::orc::detail::column_hierarchy const& selected_columns,
cudf::detail::host_2dspan<gpu::ColumnDesc> chunks,
cudf::detail::host_2dspan<gpu::RowGroup> row_groups,
host_span<orc_column_meta const> list_col,
host_span<column_buffer> out_buffers,
reader_column_meta& col_meta)
{
auto const num_of_stripes = chunks.size().first;
auto const num_of_rowgroups = row_groups.size().first;
auto const num_child_cols = selected_columns.levels[level + 1].size();
auto const number_of_child_chunks = num_child_cols * num_of_stripes;
auto& num_child_rows = col_meta.num_child_rows;
auto& parent_column_data = col_meta.parent_column_data;
// Reset the meta to store child column details.
num_child_rows.resize(selected_columns.levels[level + 1].size());
std::fill(num_child_rows.begin(), num_child_rows.end(), 0);
parent_column_data.resize(number_of_child_chunks);
col_meta.parent_column_index.resize(number_of_child_chunks);
col_meta.child_start_row.resize(number_of_child_chunks);
col_meta.num_child_rows_per_stripe.resize(number_of_child_chunks);
col_meta.rwgrp_meta.resize(num_of_rowgroups * num_child_cols);
auto child_start_row = cudf::detail::host_2dspan<uint32_t>(
col_meta.child_start_row.data(), num_of_stripes, num_child_cols);
auto num_child_rows_per_stripe = cudf::detail::host_2dspan<uint32_t>(
col_meta.num_child_rows_per_stripe.data(), num_of_stripes, num_child_cols);
auto rwgrp_meta = cudf::detail::host_2dspan<reader_column_meta::row_group_meta>(
col_meta.rwgrp_meta.data(), num_of_rowgroups, num_child_cols);
int index = 0; // number of child column processed
// For each parent column, update its child column meta for each stripe.
std::for_each(list_col.begin(), list_col.end(), [&](auto const p_col) {
auto const parent_col_idx = col_meta.orc_col_map[level][p_col.id];
auto start_row = 0;
auto processed_row_groups = 0;
for (std::size_t stripe_id = 0; stripe_id < num_of_stripes; stripe_id++) {
// Aggregate num_rows and start_row from processed parent columns per row groups
if (num_of_rowgroups) {
auto stripe_num_row_groups = chunks[stripe_id][parent_col_idx].num_rowgroups;
auto processed_child_rows = 0;
for (std::size_t rowgroup_id = 0; rowgroup_id < stripe_num_row_groups;
rowgroup_id++, processed_row_groups++) {
auto const child_rows = row_groups[processed_row_groups][parent_col_idx].num_child_rows;
for (size_type id = 0; id < p_col.num_children; id++) {
auto const child_col_idx = index + id;
rwgrp_meta[processed_row_groups][child_col_idx].start_row = processed_child_rows;
rwgrp_meta[processed_row_groups][child_col_idx].num_rows = child_rows;
}
processed_child_rows += child_rows;
}
}
// Aggregate start row, number of rows per chunk and total number of rows in a column
auto const child_rows = chunks[stripe_id][parent_col_idx].num_child_rows;
for (size_type id = 0; id < p_col.num_children; id++) {
auto const child_col_idx = index + id;
num_child_rows[child_col_idx] += child_rows;
num_child_rows_per_stripe[stripe_id][child_col_idx] = child_rows;
// start row could be different for each column when there is nesting at each stripe level
child_start_row[stripe_id][child_col_idx] = (stripe_id == 0) ? 0 : start_row;
}
start_row += child_rows;
}
// Parent column null mask and null count would be required for child column
// to adjust its nullmask.
auto type = out_buffers[parent_col_idx].type.id();
auto parent_null_count = static_cast<uint32_t>(out_buffers[parent_col_idx].null_count());
auto parent_valid_map = out_buffers[parent_col_idx].null_mask();
auto num_rows = out_buffers[parent_col_idx].size;
for (size_type id = 0; id < p_col.num_children; id++) {
auto const child_col_idx = index + id;
col_meta.parent_column_index[child_col_idx] = parent_col_idx;
if (type == type_id::STRUCT) {
parent_column_data[child_col_idx] = {parent_valid_map, parent_null_count};
// Number of rows in child will remain same as parent in case of struct column
num_child_rows[child_col_idx] = num_rows;
} else {
parent_column_data[child_col_idx] = {nullptr, 0};
}
}
index += p_col.num_children;
});
}
/**
* @brief struct to store buffer data and size of list buffer
*/
struct list_buffer_data {
size_type* data;
size_type size;
};
// Generates offsets for list buffer from number of elements in a row.
void generate_offsets_for_list(host_span<list_buffer_data> buff_data, rmm::cuda_stream_view stream)
{
for (auto& list_data : buff_data) {
thrust::exclusive_scan(rmm::exec_policy_nosync(stream),
list_data.data,
list_data.data + list_data.size,
list_data.data);
}
}
/**
* @brief Function that translates ORC data kind to cuDF type enum
*/
constexpr type_id to_cudf_type(orc::TypeKind kind,
bool use_np_dtypes,
type_id timestamp_type_id,
type_id decimal_type_id)
{
switch (kind) {
case orc::BOOLEAN: return type_id::BOOL8;
case orc::BYTE: return type_id::INT8;
case orc::SHORT: return type_id::INT16;
case orc::INT: return type_id::INT32;
case orc::LONG: return type_id::INT64;
case orc::FLOAT: return type_id::FLOAT32;
case orc::DOUBLE: return type_id::FLOAT64;
case orc::STRING:
case orc::BINARY:
case orc::VARCHAR:
case orc::CHAR:
// Variable-length types can all be mapped to STRING
return type_id::STRING;
case orc::TIMESTAMP:
return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id
: type_id::TIMESTAMP_NANOSECONDS;
case orc::DATE:
// There isn't a (DAYS -> np.dtype) mapping
return (use_np_dtypes) ? type_id::TIMESTAMP_MILLISECONDS : type_id::TIMESTAMP_DAYS;
case orc::DECIMAL: return decimal_type_id;
// Need to update once cuDF plans to support map type
case orc::MAP:
case orc::LIST: return type_id::LIST;
case orc::STRUCT: return type_id::STRUCT;
default: break;
}
return type_id::EMPTY;
}
/**
* @brief Determines cuDF type of an ORC Decimal column.
*/
type_id to_cudf_decimal_type(host_span<std::string const> decimal128_columns,
cudf::io::orc::detail::aggregate_orc_metadata const& metadata,
int column_index)
{
if (metadata.get_col_type(column_index).kind != DECIMAL) { return type_id::EMPTY; }
if (std::find(decimal128_columns.begin(),
decimal128_columns.end(),
metadata.column_path(0, column_index)) != decimal128_columns.end()) {
return type_id::DECIMAL128;
}
auto const precision = metadata.get_col_type(column_index)
.precision.value_or(cuda::std::numeric_limits<int64_t>::digits10);
if (precision <= cuda::std::numeric_limits<int32_t>::digits10) { return type_id::DECIMAL32; }
if (precision <= cuda::std::numeric_limits<int64_t>::digits10) { return type_id::DECIMAL64; }
return type_id::DECIMAL128;
}
std::string get_map_child_col_name(std::size_t const idx) { return (idx == 0) ? "key" : "value"; }
/**
* @brief Create empty columns and respective schema information from the buffer.
*/
std::unique_ptr<column> create_empty_column(
size_type orc_col_id,
cudf::io::orc::detail::aggregate_orc_metadata const& metadata,
host_span<std::string const> decimal128_columns,
bool use_np_dtypes,
data_type timestamp_type,
column_name_info& schema_info,
rmm::cuda_stream_view stream)
{
schema_info.name = metadata.column_name(0, orc_col_id);
auto const kind = metadata.get_col_type(orc_col_id).kind;
auto const type = to_cudf_type(kind,
use_np_dtypes,
timestamp_type.id(),
to_cudf_decimal_type(decimal128_columns, metadata, orc_col_id));
switch (kind) {
case orc::LIST: {
schema_info.children.emplace_back("offsets");
schema_info.children.emplace_back("");
return make_lists_column(0,
make_empty_column(type_id::INT32),
create_empty_column(metadata.get_col_type(orc_col_id).subtypes[0],
metadata,
decimal128_columns,
use_np_dtypes,
timestamp_type,
schema_info.children.back(),
stream),
0,
rmm::device_buffer{0, stream},
stream);
}
case orc::MAP: {
schema_info.children.emplace_back("offsets");
schema_info.children.emplace_back("struct");
auto const child_column_ids = metadata.get_col_type(orc_col_id).subtypes;
auto& children_schema = schema_info.children.back().children;
std::vector<std::unique_ptr<column>> child_columns;
for (std::size_t idx = 0; idx < metadata.get_col_type(orc_col_id).subtypes.size(); idx++) {
children_schema.emplace_back("");
child_columns.push_back(create_empty_column(child_column_ids[idx],
metadata,
decimal128_columns,
use_np_dtypes,
timestamp_type,
schema_info.children.back().children.back(),
stream));
children_schema[idx].name = get_map_child_col_name(idx);
}
return make_lists_column(
0,
make_empty_column(type_id::INT32),
make_structs_column(0, std::move(child_columns), 0, rmm::device_buffer{0, stream}, stream),
0,
rmm::device_buffer{0, stream},
stream);
}
case orc::STRUCT: {
std::vector<std::unique_ptr<column>> child_columns;
for (auto const col : metadata.get_col_type(orc_col_id).subtypes) {
schema_info.children.emplace_back("");
child_columns.push_back(create_empty_column(col,
metadata,
decimal128_columns,
use_np_dtypes,
timestamp_type,
schema_info.children.back(),
stream));
}
return make_structs_column(
0, std::move(child_columns), 0, rmm::device_buffer{0, stream}, stream);
}
case orc::DECIMAL: {
int32_t scale = 0;
if (type == type_id::DECIMAL32 or type == type_id::DECIMAL64 or type == type_id::DECIMAL128) {
scale = -static_cast<int32_t>(metadata.get_types()[orc_col_id].scale.value_or(0));
}
return make_empty_column(data_type(type, scale));
}
default: return make_empty_column(type);
}
}
/**
* @brief Assemble the buffer with child columns.
*/
column_buffer assemble_buffer(size_type orc_col_id,
std::size_t level,
reader_column_meta const& col_meta,
cudf::io::orc::detail::aggregate_orc_metadata const& metadata,
cudf::io::orc::detail::column_hierarchy const& selected_columns,
std::vector<std::vector<column_buffer>>& col_buffers,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const col_id = col_meta.orc_col_map[level][orc_col_id];
auto& col_buffer = col_buffers[level][col_id];
col_buffer.name = metadata.column_name(0, orc_col_id);
auto kind = metadata.get_col_type(orc_col_id).kind;
switch (kind) {
case orc::LIST:
case orc::STRUCT: {
auto const& children_indices = selected_columns.children.at(orc_col_id);
for (auto const child_id : children_indices) {
col_buffer.children.emplace_back(assemble_buffer(
child_id, level + 1, col_meta, metadata, selected_columns, col_buffers, stream, mr));
}
} break;
case orc::MAP: {
std::vector<column_buffer> child_col_buffers;
// Get child buffers
auto const& children_indices = selected_columns.children.at(orc_col_id);
for (std::size_t idx = 0; idx < children_indices.size(); idx++) {
auto const col = children_indices[idx];
child_col_buffers.emplace_back(assemble_buffer(
col, level + 1, col_meta, metadata, selected_columns, col_buffers, stream, mr));
child_col_buffers.back().name = get_map_child_col_name(idx);
}
// Create a struct buffer
auto num_rows = child_col_buffers[0].size;
auto struct_buffer =
column_buffer(cudf::data_type(type_id::STRUCT), num_rows, false, stream, mr);
struct_buffer.children = std::move(child_col_buffers);
struct_buffer.name = "struct";
col_buffer.children.emplace_back(std::move(struct_buffer));
} break;
default: break;
}
return std::move(col_buffer);
}
} // namespace
reader::impl::impl(std::vector<std::unique_ptr<datasource>>&& sources,
orc_reader_options const& options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _stream(stream),
_mr(mr),
_sources(std::move(sources)),
_metadata{_sources, stream},
_selected_columns{_metadata.select_columns(options.get_columns())},
_timestamp_type{options.get_timestamp_type()},
_use_index{options.is_enabled_use_index()},
_use_np_dtypes{options.is_enabled_use_np_dtypes()},
_decimal128_columns{options.get_decimal128_columns()},
_col_meta{std::make_unique<reader_column_meta>()}
{
}
table_with_metadata reader::impl::read(uint64_t skip_rows,
std::optional<size_type> const& num_rows_opt,
std::vector<std::vector<size_type>> const& stripes)
{
// Selected columns at different levels of nesting are stored in different elements
// of `selected_columns`; thus, size == 1 means no nested columns
CUDF_EXPECTS(skip_rows == 0 or _selected_columns.num_levels() == 1,
"skip_rows is not supported by nested columns");
// There are no columns in the table
if (_selected_columns.num_levels() == 0) { return {std::make_unique<table>(), table_metadata{}}; }
std::vector<std::vector<column_buffer>> out_buffers(_selected_columns.num_levels());
std::vector<std::unique_ptr<column>> out_columns;
table_metadata out_metadata;
// Copy user data to the output metadata.
std::transform(_metadata.per_file_metadata.cbegin(),
_metadata.per_file_metadata.cend(),
std::back_inserter(out_metadata.per_file_user_data),
[](auto& meta) {
std::unordered_map<std::string, std::string> kv_map;
std::transform(meta.ff.metadata.cbegin(),
meta.ff.metadata.cend(),
std::inserter(kv_map, kv_map.end()),
[](auto const& kv) {
return std::pair{kv.name, kv.value};
});
return kv_map;
});
out_metadata.user_data = {out_metadata.per_file_user_data[0].begin(),
out_metadata.per_file_user_data[0].end()};
// Select only stripes required (aka row groups)
auto const [rows_to_skip, rows_to_read, selected_stripes] =
_metadata.select_stripes(stripes, skip_rows, num_rows_opt, _stream);
// If no rows or stripes to read, return empty columns
if (rows_to_read == 0 || selected_stripes.empty()) {
std::transform(_selected_columns.levels[0].begin(),
_selected_columns.levels[0].end(),
std::back_inserter(out_columns),
[&](auto const col_meta) {
out_metadata.schema_info.emplace_back("");
return create_empty_column(col_meta.id,
_metadata,
_decimal128_columns,
_use_np_dtypes,
_timestamp_type,
out_metadata.schema_info.back(),
_stream);
});
return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)};
}
// Set up table for converting timestamp columns from local to UTC time
auto const tz_table = [&, &selected_stripes = selected_stripes] {
auto const has_timestamp_column = std::any_of(
_selected_columns.levels.cbegin(), _selected_columns.levels.cend(), [&](auto const& col_lvl) {
return std::any_of(col_lvl.cbegin(), col_lvl.cend(), [&](auto const& col_meta) {
return _metadata.get_col_type(col_meta.id).kind == TypeKind::TIMESTAMP;
});
});
return has_timestamp_column
? cudf::detail::make_timezone_transition_table(
{}, selected_stripes[0].stripe_info[0].second->writerTimezone, _stream)
: std::make_unique<cudf::table>();
}();
std::vector<std::vector<rmm::device_buffer>> lvl_stripe_data(_selected_columns.num_levels());
std::vector<std::vector<rmm::device_uvector<uint32_t>>> null_count_prefix_sums;
// Iterates through levels of nested columns, child column will be one level down
// compared to parent column.
auto& col_meta = *_col_meta;
for (std::size_t level = 0; level < _selected_columns.num_levels(); ++level) {
auto& columns_level = _selected_columns.levels[level];
// Association between each ORC column and its cudf::column
col_meta.orc_col_map.emplace_back(_metadata.get_num_cols(), -1);
std::vector<orc_column_meta> nested_col;
// Get a list of column data types
std::vector<data_type> column_types;
for (auto& col : columns_level) {
auto col_type = to_cudf_type(_metadata.get_col_type(col.id).kind,
_use_np_dtypes,
_timestamp_type.id(),
to_cudf_decimal_type(_decimal128_columns, _metadata, col.id));
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
if (col_type == type_id::DECIMAL32 or col_type == type_id::DECIMAL64 or
col_type == type_id::DECIMAL128) {
// sign of the scale is changed since cuDF follows c++ libraries like CNL
// which uses negative scaling, but liborc and other libraries
// follow positive scaling.
auto const scale =
-static_cast<size_type>(_metadata.get_col_type(col.id).scale.value_or(0));
column_types.emplace_back(col_type, scale);
} else {
column_types.emplace_back(col_type);
}
// Map each ORC column to its column
col_meta.orc_col_map[level][col.id] = column_types.size() - 1;
if (col_type == type_id::LIST or col_type == type_id::STRUCT) {
nested_col.emplace_back(col);
}
}
// Get the total number of stripes across all input files.
std::size_t total_num_stripes =
std::accumulate(selected_stripes.begin(),
selected_stripes.end(),
0,
[](std::size_t sum, auto& stripe_source_mapping) {
return sum + stripe_source_mapping.stripe_info.size();
});
auto const num_columns = columns_level.size();
cudf::detail::hostdevice_2dvector<gpu::ColumnDesc> chunks(
total_num_stripes, num_columns, _stream);
memset(chunks.base_host_ptr(), 0, chunks.size_bytes());
const bool use_index =
_use_index &&
// Do stripes have row group index
_metadata.is_row_grp_idx_present() &&
// Only use if we don't have much work with complete columns & stripes
// TODO: Consider nrows, gpu, and tune the threshold
(rows_to_read > _metadata.get_row_index_stride() && !(_metadata.get_row_index_stride() & 7) &&
_metadata.get_row_index_stride() > 0 && num_columns * total_num_stripes < 8 * 128) &&
// Only use if first row is aligned to a stripe boundary
// TODO: Fix logic to handle unaligned rows
(rows_to_skip == 0);
// Logically view streams as columns
std::vector<orc_stream_info> stream_info;
null_count_prefix_sums.emplace_back();
null_count_prefix_sums.back().reserve(_selected_columns.levels[level].size());
std::generate_n(std::back_inserter(null_count_prefix_sums.back()),
_selected_columns.levels[level].size(),
[&]() {
return cudf::detail::make_zeroed_device_uvector_async<uint32_t>(
total_num_stripes, _stream, rmm::mr::get_current_device_resource());
});
// Tracker for eventually deallocating compressed and uncompressed data
auto& stripe_data = lvl_stripe_data[level];
std::size_t stripe_start_row = 0;
std::size_t num_dict_entries = 0;
std::size_t num_rowgroups = 0;
int stripe_idx = 0;
std::vector<std::pair<std::future<std::size_t>, std::size_t>> read_tasks;
for (auto const& stripe_source_mapping : selected_stripes) {
// Iterate through the source files selected stripes
for (auto const& stripe : stripe_source_mapping.stripe_info) {
auto const stripe_info = stripe.first;
auto const stripe_footer = stripe.second;
auto stream_count = stream_info.size();
auto const total_data_size = gather_stream_info(stripe_idx,
stripe_info,
stripe_footer,
col_meta.orc_col_map[level],
_metadata.get_types(),
use_index,
level == 0,
&num_dict_entries,
stream_info,
chunks);
auto const is_stripe_data_empty = total_data_size == 0;
CUDF_EXPECTS(not is_stripe_data_empty or stripe_info->indexLength == 0,
"Invalid index rowgroup stream data");
// Buffer needs to be padded.
// Required by `copy_uncompressed_kernel`.
stripe_data.emplace_back(
cudf::util::round_up_safe(total_data_size, BUFFER_PADDING_MULTIPLE), _stream);
auto dst_base = static_cast<uint8_t*>(stripe_data.back().data());
// Coalesce consecutive streams into one read
while (not is_stripe_data_empty and stream_count < stream_info.size()) {
auto const d_dst = dst_base + stream_info[stream_count].dst_pos;
auto const offset = stream_info[stream_count].offset;
auto len = stream_info[stream_count].length;
stream_count++;
while (stream_count < stream_info.size() &&
stream_info[stream_count].offset == offset + len) {
len += stream_info[stream_count].length;
stream_count++;
}
if (_metadata.per_file_metadata[stripe_source_mapping.source_idx]
.source->is_device_read_preferred(len)) {
read_tasks.push_back(
std::pair(_metadata.per_file_metadata[stripe_source_mapping.source_idx]
.source->device_read_async(offset, len, d_dst, _stream),
len));
} else {
auto const buffer =
_metadata.per_file_metadata[stripe_source_mapping.source_idx].source->host_read(
offset, len);
CUDF_EXPECTS(buffer->size() == len, "Unexpected discrepancy in bytes read.");
CUDF_CUDA_TRY(
cudaMemcpyAsync(d_dst, buffer->data(), len, cudaMemcpyDefault, _stream.value()));
_stream.synchronize();
}
}
auto const num_rows_per_stripe = stripe_info->numberOfRows;
auto const rowgroup_id = num_rowgroups;
auto stripe_num_rowgroups = 0;
if (use_index) {
stripe_num_rowgroups = (num_rows_per_stripe + _metadata.get_row_index_stride() - 1) /
_metadata.get_row_index_stride();
}
// Update chunks to reference streams pointers
for (std::size_t col_idx = 0; col_idx < num_columns; col_idx++) {
auto& chunk = chunks[stripe_idx][col_idx];
// start row, number of rows in a each stripe and total number of rows
// may change in lower levels of nesting
chunk.start_row = (level == 0)
? stripe_start_row
: col_meta.child_start_row[stripe_idx * num_columns + col_idx];
chunk.num_rows =
(level == 0) ? stripe_info->numberOfRows
: col_meta.num_child_rows_per_stripe[stripe_idx * num_columns + col_idx];
chunk.column_num_rows = (level == 0) ? rows_to_read : col_meta.num_child_rows[col_idx];
chunk.parent_validity_info =
(level == 0) ? column_validity_info{} : col_meta.parent_column_data[col_idx];
chunk.parent_null_count_prefix_sums =
(level == 0)
? nullptr
: null_count_prefix_sums[level - 1][col_meta.parent_column_index[col_idx]].data();
chunk.encoding_kind = stripe_footer->columns[columns_level[col_idx].id].kind;
chunk.type_kind = _metadata.per_file_metadata[stripe_source_mapping.source_idx]
.ff.types[columns_level[col_idx].id]
.kind;
// num_child_rows for a struct column will be same, for other nested types it will be
// calculated.
chunk.num_child_rows = (chunk.type_kind != orc::STRUCT) ? 0 : chunk.num_rows;
chunk.dtype_id = column_types[col_idx].id();
chunk.decimal_scale = _metadata.per_file_metadata[stripe_source_mapping.source_idx]
.ff.types[columns_level[col_idx].id]
.scale.value_or(0);
chunk.rowgroup_id = rowgroup_id;
chunk.dtype_len = (column_types[col_idx].id() == type_id::STRING)
? sizeof(string_index_pair)
: ((column_types[col_idx].id() == type_id::LIST) or
(column_types[col_idx].id() == type_id::STRUCT))
? sizeof(size_type)
: cudf::size_of(column_types[col_idx]);
chunk.num_rowgroups = stripe_num_rowgroups;
if (chunk.type_kind == orc::TIMESTAMP) { chunk.timestamp_type_id = _timestamp_type.id(); }
if (not is_stripe_data_empty) {
for (int k = 0; k < gpu::CI_NUM_STREAMS; k++) {
chunk.streams[k] = dst_base + stream_info[chunk.strm_id[k]].dst_pos;
}
}
}
stripe_start_row += num_rows_per_stripe;
num_rowgroups += stripe_num_rowgroups;
stripe_idx++;
}
}
for (auto& task : read_tasks) {
CUDF_EXPECTS(task.first.get() == task.second, "Unexpected discrepancy in bytes read.");
}
if (stripe_data.empty()) { continue; }
// Process dataset chunk pages into output columns
auto row_groups =
cudf::detail::hostdevice_2dvector<gpu::RowGroup>(num_rowgroups, num_columns, _stream);
if (level > 0 and row_groups.size().first) {
cudf::host_span<gpu::RowGroup> row_groups_span(row_groups.base_host_ptr(),
num_rowgroups * num_columns);
auto& rw_grp_meta = col_meta.rwgrp_meta;
// Update start row and num rows per row group
std::transform(rw_grp_meta.begin(),
rw_grp_meta.end(),
row_groups_span.begin(),
rw_grp_meta.begin(),
[&](auto meta, auto& row_grp) {
row_grp.num_rows = meta.num_rows;
row_grp.start_row = meta.start_row;
return meta;
});
}
// Setup row group descriptors if using indexes
if (_metadata.per_file_metadata[0].ps.compression != orc::NONE) {
auto decomp_data = decompress_stripe_data(*_metadata.per_file_metadata[0].decompressor,
stripe_data,
stream_info,
chunks,
row_groups,
total_num_stripes,
_metadata.get_row_index_stride(),
level == 0,
_stream);
stripe_data.clear();
stripe_data.push_back(std::move(decomp_data));
} else {
if (row_groups.size().first) {
chunks.host_to_device_async(_stream);
row_groups.host_to_device_async(_stream);
gpu::ParseRowGroupIndex(row_groups.base_device_ptr(),
nullptr,
chunks.base_device_ptr(),
num_columns,
total_num_stripes,
num_rowgroups,
_metadata.get_row_index_stride(),
level == 0,
_stream);
}
}
for (std::size_t i = 0; i < column_types.size(); ++i) {
bool is_nullable = false;
for (std::size_t j = 0; j < total_num_stripes; ++j) {
if (chunks[j][i].strm_len[gpu::CI_PRESENT] != 0) {
is_nullable = true;
break;
}
}
auto is_list_type = (column_types[i].id() == type_id::LIST);
auto n_rows = (level == 0) ? rows_to_read : col_meta.num_child_rows[i];
// For list column, offset column will be always size + 1
if (is_list_type) n_rows++;
out_buffers[level].emplace_back(column_types[i], n_rows, is_nullable, _stream, _mr);
}
decode_stream_data(num_dict_entries,
rows_to_skip,
_metadata.get_row_index_stride(),
level,
tz_table->view(),
chunks,
row_groups,
out_buffers[level],
_stream,
_mr);
if (nested_col.size()) {
// Extract information to process nested child columns
scan_null_counts(chunks, null_count_prefix_sums[level], _stream);
row_groups.device_to_host_sync(_stream);
aggregate_child_meta(
level, _selected_columns, chunks, row_groups, nested_col, out_buffers[level], col_meta);
// ORC stores number of elements at each row, so we need to generate offsets from that
std::vector<list_buffer_data> buff_data;
std::for_each(
out_buffers[level].begin(), out_buffers[level].end(), [&buff_data](auto& out_buffer) {
if (out_buffer.type.id() == type_id::LIST) {
auto data = static_cast<size_type*>(out_buffer.data());
buff_data.emplace_back(list_buffer_data{data, out_buffer.size});
}
});
if (not buff_data.empty()) { generate_offsets_for_list(buff_data, _stream); }
}
}
// Create columns from buffer with respective schema information.
std::transform(
_selected_columns.levels[0].begin(),
_selected_columns.levels[0].end(),
std::back_inserter(out_columns),
[&](auto const& orc_col_meta) {
out_metadata.schema_info.emplace_back("");
auto col_buffer = assemble_buffer(
orc_col_meta.id, 0, col_meta, _metadata, _selected_columns, out_buffers, _stream, _mr);
return make_column(col_buffer, &out_metadata.schema_info.back(), std::nullopt, _stream);
});
return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)};
}
// Forward to implementation
reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>>&& sources,
orc_reader_options const& options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _impl{std::make_unique<impl>(std::move(sources), options, stream, mr)}
{
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read(orc_reader_options const& options)
{
return _impl->read(options.get_skip_rows(), options.get_num_rows(), options.get_stripes());
}
} // namespace cudf::io::detail::orc
|
038eed16509838c6d9ca6520b2511220d4e8d240.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDA/problems/rosenbrock/sa/CudaAnnealingRosenbrock.h"
#include "CUDA/utils/CudaRandom.h"
#include "CUDA/problems/salesman/CudaSalesmanUtils.h"
#include "assert.h"
__device__ float cuda_rosenbrock_get_energy(Individual<float> my_current_individual, int genome_size, const void* param) {
// genome size = N!
return cuda_rosenbrock_evaluate(my_current_individual, genome_size);
}
__device__ cuda_get_energy<float> device_rosen_energy = cuda_rosenbrock_get_energy;
__device__ cuda_mutation_func<float> device_anneal_rosenbrock_mutation = cuda_rosenbrock_mutate;
cuda_get_energy<float> CudaAnnealingRosenbrock::get_energy_func(){
return on_host_energy;
}
cuda_mutation_func<float> CudaAnnealingRosenbrock::get_mutation_func(){
return on_host_mutation;
}
void CudaAnnealingRosenbrock::init_data_randomly() {
hipLaunchKernelGGL(( cuda_init_floats_randomly), dim3(gridDim), dim3(blockDim), 0, 0, dev_current_sol, gridDim.x, gridDim.x * blockDim.x, genome_size, dev_random, -5, 5);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(dev_starting_point, dev_current_sol, sizeof(int) * genome_size, hipMemcpyDeviceToDevice));
}
void CudaAnnealingRosenbrock::init() {
CudaSimulatedAnnealing<float>::init();
checkCudaErrors(hipMemcpy(dev_current_temp, ¤tTemp, sizeof(float), hipMemcpyHostToDevice));
// copy to function pointers
checkCudaErrors(hipMemcpyFromSymbol(&on_host_energy, device_rosen_energy, sizeof(device_rosen_energy)));
checkCudaErrors(hipMemcpyFromSymbol(&on_host_mutation, device_anneal_rosenbrock_mutation, sizeof(device_anneal_rosenbrock_mutation)));
device_param = (void*) NULL;
}
| 038eed16509838c6d9ca6520b2511220d4e8d240.cu | #include "CUDA/problems/rosenbrock/sa/CudaAnnealingRosenbrock.h"
#include "CUDA/utils/CudaRandom.h"
#include "CUDA/problems/salesman/CudaSalesmanUtils.h"
#include "assert.h"
__device__ float cuda_rosenbrock_get_energy(Individual<float> my_current_individual, int genome_size, const void* param) {
// genome size = N!
return cuda_rosenbrock_evaluate(my_current_individual, genome_size);
}
__device__ cuda_get_energy<float> device_rosen_energy = cuda_rosenbrock_get_energy;
__device__ cuda_mutation_func<float> device_anneal_rosenbrock_mutation = cuda_rosenbrock_mutate;
cuda_get_energy<float> CudaAnnealingRosenbrock::get_energy_func(){
return on_host_energy;
}
cuda_mutation_func<float> CudaAnnealingRosenbrock::get_mutation_func(){
return on_host_mutation;
}
void CudaAnnealingRosenbrock::init_data_randomly() {
cuda_init_floats_randomly<<<gridDim, blockDim, 0>>>(dev_current_sol, gridDim.x, gridDim.x * blockDim.x, genome_size, dev_random, -5, 5);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(dev_starting_point, dev_current_sol, sizeof(int) * genome_size, cudaMemcpyDeviceToDevice));
}
void CudaAnnealingRosenbrock::init() {
CudaSimulatedAnnealing<float>::init();
checkCudaErrors(cudaMemcpy(dev_current_temp, ¤tTemp, sizeof(float), cudaMemcpyHostToDevice));
// copy to function pointers
checkCudaErrors(cudaMemcpyFromSymbol(&on_host_energy, device_rosen_energy, sizeof(device_rosen_energy)));
checkCudaErrors(cudaMemcpyFromSymbol(&on_host_mutation, device_anneal_rosenbrock_mutation, sizeof(device_anneal_rosenbrock_mutation)));
device_param = (void*) NULL;
}
|
92ecaf664b4dacc8026c23664429597079e08bdf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
void run_kernel_cholesky(int dim)
{
int start_id, b;
int threads_per_block;
int number_of_blocks;
int num_iters;
double startime, endtime ;
start_id = 0;
// dim = 5000;
b = 2;
start_id = 0;
init_and_print();
// get_input(dim);
initialize_random(dim);
printf("okay random bhar gaya\n");
setup_cholesky(dim, b);
// Input generation
// 1. taking transpose of mt in mt_transpose
threads_per_block = 1024;
number_of_blocks = upit(dim * dim, threads_per_block);
hipLaunchKernelGGL(( generic_matrix_transpose), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, mt, mt_transpose, dim, dim);
hipDeviceSynchronize();
printf("ab jakar transpose hua\n");
/*
print_matrix_kernel<<<1, 1>>>(mt, dim, dim);
hipDeviceSynchronize();
print_matrix_kernel<<<1, 1>>>(mt_transpose, dim, dim);
hipDeviceSynchronize();
*/
startime = CycleTimer::currentSeconds();
dim3 blockDimTemp(32,32);
dim3 gridDimTemp( upit(dim, blockDimTemp.x), upit(dim, blockDimTemp.y));
//matrixmultiply_noshare(double *a, int rowsA, int colsA, double *b, int rowsB, int colsB, double *c)
hipLaunchKernelGGL(( matrixmultiply_noshare), dim3(gridDimTemp), dim3(blockDimTemp) , 0, 0, mt, dim, dim, mt_transpose, dim, dim, M);
hipDeviceSynchronize();
endtime = CycleTimer::currentSeconds();
printf("Now multiplication got over, total time taken for dim = %d, is %lf\n", dim, endtime - startime);
// Now copying the symmetric matrix from CUDA to host
orig_sym = new double[dim * dim];
cudacall(hipMemcpy(orig_sym, M, sizeof(double) * dim * dim, hipMemcpyDeviceToHost));
printf("Host me aya kyaa??\n");
// WRITING TO FILE
/*
std::ofstream out(filename);
for(int i = 0; i < dim ; i++){
for(int j = 0; j < dim ; j++){
out << orig_sym[i*dim + j] << " ";
// printf("%lf ", orig_sym[i*dim + j]);
}
out << "\n";
// printf("\n");
}
out.close();
*/
startime = CycleTimer::currentSeconds();
num_iters = dim / b;
for (int i = 0; i < num_iters; i++)
{
hipLaunchKernelGGL(( hardcoded_cholesky_2x2), dim3(1), dim3(1), 0, 0, M, a11, dim, b, start_id);
hipDeviceSynchronize();
if (i == num_iters - 1)
break;
// TODO optimize a21_transpose, by bypassing it perhaps? Can avoid transpose and manipulate indices inside next kernel
threads_per_block = 512;
number_of_blocks = upit((dim - b - start_id) * b, threads_per_block);
hipLaunchKernelGGL(( take_a21_transpose), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, M, a21_transpose, dim, b, start_id);
hipDeviceSynchronize();
threads_per_block = 512;
number_of_blocks = upit((dim - b - start_id), threads_per_block);
hipLaunchKernelGGL(( forward_substitution_rectangular_a21), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, M, a11, a21_transpose, l21_transpose_from_fs, dim, b, start_id);
hipDeviceSynchronize();
// printf("Printing l21_transpose_from_fs\n");
// print_matrix_kernel<<<1, 1>>>(l21_transpose_from_fs, b, dim - b - start_id);
// hipDeviceSynchronize();
/*
printf("\n\n");
printf(" ---------------------------------------- \n");
print_matrix_kernel<<<1, 1>>>(a11, b, b);
hipDeviceSynchronize();
printf(" ---------------------------------------- \n");
print_matrix_kernel<<<1,1>>>(a21_transpose, b, dim - b - start_id);
hipDeviceSynchronize();
printf(" ---------------------------------------- \n");
singlethread_temp_matmult_kernel<<<1, 1>>>(a11, a21_transpose, l21_transpose_from_fs, b, b, dim - b - start_id);
hipDeviceSynchronize();
print_matrix_kernel<<<1,1>>>(l21_transpose_from_fs, b, dim - b - start_id);
hipDeviceSynchronize();
printf("\n\n");
*/
//printf("\nNow printing entire M matrix\n");
//print_matrix_kernel<<<1, 1>>>(M, dim, dim);
//hipDeviceSynchronize();
// TODO: Can include this tranpose in the forward_substitution_rectangular_a22 call!!!!
// Now taking transpose of l21_transpose_from_fs
threads_per_block = 512;
number_of_blocks = upit((dim - b - start_id) * b, threads_per_block);
hipLaunchKernelGGL(( generic_matrix_transpose), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, l21_transpose_from_fs, l21, b, dim - b - start_id);
hipDeviceSynchronize();
// printf("\nNow checking the transpose => \n");
// print_matrix_kernel<<<1,1>>>(l21, dim - b - start_id, b);
// hipDeviceSynchronize();
// printf("Checking the l21_transpose_from_fs matrix\n");
// check_l21_kernel<<<1, 1>>>(a11, l21_transpose_from_fs, a21_transpose, b, b, dim - b - start_id);
// hipDeviceSynchronize();
//matrixmultiply_noshare<<<(double *a, int rowsA, int colsA, double *b, int rowsB, int colsB, double *c)
int rowA = (dim - b - start_id) , colA = b, rowB = b , colB = (dim - b - start_id) ;
dim3 blockDim(32,32);
dim3 gridDim( upit(colB, blockDim.x), upit(rowA, blockDim.y));
hipLaunchKernelGGL(( matrixmultiply_noshare), dim3(gridDim), dim3(blockDim) , 0, 0, l21, (dim - b - start_id), b, l21_transpose_from_fs, b, dim - b - start_id, l22_temp);
hipDeviceSynchronize();
threads_per_block = 512;
number_of_blocks = upit((dim - b - start_id) * (dim - b - start_id), threads_per_block);
hipLaunchKernelGGL(( offseted_elementwise_subtraction), dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, l22_temp, dim - b - start_id, M, dim, b, start_id);
hipDeviceSynchronize();
start_id += b;
}
// Fire a kernel for making upper-triangular as 0.0
threads_per_block = 512;
number_of_blocks = upit( (dim * dim), threads_per_block);
hipLaunchKernelGGL(( set_upper_zero), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, M, dim);
hipDeviceSynchronize();
endtime = CycleTimer::currentSeconds();
printf("Totat time taken = %lf s\n", endtime - startime);
// Now checking!
double *finalans = new double[dim * dim];
cudacall(hipMemcpy(finalans, M, sizeof(double) * dim * dim, hipMemcpyDeviceToHost));
check_cholesky(finalans, orig_sym, dim);
/*for(int i = 0; i < dim ; i++){
for(int j = 0; j < dim ; j++){
printf("%lf ", finalans[i*dim + j]);
}
printf("\n");
}*/
}
| 92ecaf664b4dacc8026c23664429597079e08bdf.cu | void run_kernel_cholesky(int dim)
{
int start_id, b;
int threads_per_block;
int number_of_blocks;
int num_iters;
double startime, endtime ;
start_id = 0;
// dim = 5000;
b = 2;
start_id = 0;
init_and_print();
// get_input(dim);
initialize_random(dim);
printf("okay random bhar gaya\n");
setup_cholesky(dim, b);
// Input generation
// 1. taking transpose of mt in mt_transpose
threads_per_block = 1024;
number_of_blocks = upit(dim * dim, threads_per_block);
generic_matrix_transpose<<<number_of_blocks, threads_per_block>>>(mt, mt_transpose, dim, dim);
cudaThreadSynchronize();
printf("ab jakar transpose hua\n");
/*
print_matrix_kernel<<<1, 1>>>(mt, dim, dim);
cudaThreadSynchronize();
print_matrix_kernel<<<1, 1>>>(mt_transpose, dim, dim);
cudaThreadSynchronize();
*/
startime = CycleTimer::currentSeconds();
dim3 blockDimTemp(32,32);
dim3 gridDimTemp( upit(dim, blockDimTemp.x), upit(dim, blockDimTemp.y));
//matrixmultiply_noshare(double *a, int rowsA, int colsA, double *b, int rowsB, int colsB, double *c)
matrixmultiply_noshare<<<gridDimTemp, blockDimTemp >>>(mt, dim, dim, mt_transpose, dim, dim, M);
cudaThreadSynchronize();
endtime = CycleTimer::currentSeconds();
printf("Now multiplication got over, total time taken for dim = %d, is %lf\n", dim, endtime - startime);
// Now copying the symmetric matrix from CUDA to host
orig_sym = new double[dim * dim];
cudacall(cudaMemcpy(orig_sym, M, sizeof(double) * dim * dim, cudaMemcpyDeviceToHost));
printf("Host me aya kyaa??\n");
// WRITING TO FILE
/*
std::ofstream out(filename);
for(int i = 0; i < dim ; i++){
for(int j = 0; j < dim ; j++){
out << orig_sym[i*dim + j] << " ";
// printf("%lf ", orig_sym[i*dim + j]);
}
out << "\n";
// printf("\n");
}
out.close();
*/
startime = CycleTimer::currentSeconds();
num_iters = dim / b;
for (int i = 0; i < num_iters; i++)
{
hardcoded_cholesky_2x2<<<1, 1>>>(M, a11, dim, b, start_id);
cudaThreadSynchronize();
if (i == num_iters - 1)
break;
// TODO optimize a21_transpose, by bypassing it perhaps? Can avoid transpose and manipulate indices inside next kernel
threads_per_block = 512;
number_of_blocks = upit((dim - b - start_id) * b, threads_per_block);
take_a21_transpose<<<number_of_blocks, threads_per_block>>>(M, a21_transpose, dim, b, start_id);
cudaThreadSynchronize();
threads_per_block = 512;
number_of_blocks = upit((dim - b - start_id), threads_per_block);
forward_substitution_rectangular_a21<<<number_of_blocks, threads_per_block>>>(M, a11, a21_transpose, l21_transpose_from_fs, dim, b, start_id);
cudaThreadSynchronize();
// printf("Printing l21_transpose_from_fs\n");
// print_matrix_kernel<<<1, 1>>>(l21_transpose_from_fs, b, dim - b - start_id);
// cudaThreadSynchronize();
/*
printf("\n\n");
printf(" ---------------------------------------- \n");
print_matrix_kernel<<<1, 1>>>(a11, b, b);
cudaThreadSynchronize();
printf(" ---------------------------------------- \n");
print_matrix_kernel<<<1,1>>>(a21_transpose, b, dim - b - start_id);
cudaThreadSynchronize();
printf(" ---------------------------------------- \n");
singlethread_temp_matmult_kernel<<<1, 1>>>(a11, a21_transpose, l21_transpose_from_fs, b, b, dim - b - start_id);
cudaThreadSynchronize();
print_matrix_kernel<<<1,1>>>(l21_transpose_from_fs, b, dim - b - start_id);
cudaThreadSynchronize();
printf("\n\n");
*/
//printf("\nNow printing entire M matrix\n");
//print_matrix_kernel<<<1, 1>>>(M, dim, dim);
//cudaThreadSynchronize();
// TODO: Can include this tranpose in the forward_substitution_rectangular_a22 call!!!!
// Now taking transpose of l21_transpose_from_fs
threads_per_block = 512;
number_of_blocks = upit((dim - b - start_id) * b, threads_per_block);
generic_matrix_transpose<<<number_of_blocks, threads_per_block>>>(l21_transpose_from_fs, l21, b, dim - b - start_id);
cudaThreadSynchronize();
// printf("\nNow checking the transpose => \n");
// print_matrix_kernel<<<1,1>>>(l21, dim - b - start_id, b);
// cudaThreadSynchronize();
// printf("Checking the l21_transpose_from_fs matrix\n");
// check_l21_kernel<<<1, 1>>>(a11, l21_transpose_from_fs, a21_transpose, b, b, dim - b - start_id);
// cudaThreadSynchronize();
//matrixmultiply_noshare<<<(double *a, int rowsA, int colsA, double *b, int rowsB, int colsB, double *c)
int rowA = (dim - b - start_id) , colA = b, rowB = b , colB = (dim - b - start_id) ;
dim3 blockDim(32,32);
dim3 gridDim( upit(colB, blockDim.x), upit(rowA, blockDim.y));
matrixmultiply_noshare<<<gridDim, blockDim >>>(l21, (dim - b - start_id), b, l21_transpose_from_fs, b, dim - b - start_id, l22_temp);
cudaThreadSynchronize();
threads_per_block = 512;
number_of_blocks = upit((dim - b - start_id) * (dim - b - start_id), threads_per_block);
offseted_elementwise_subtraction<<<number_of_blocks, threads_per_block >>>(l22_temp, dim - b - start_id, M, dim, b, start_id);
cudaThreadSynchronize();
start_id += b;
}
// Fire a kernel for making upper-triangular as 0.0
threads_per_block = 512;
number_of_blocks = upit( (dim * dim), threads_per_block);
set_upper_zero<<<number_of_blocks, threads_per_block>>>(M, dim);
cudaThreadSynchronize();
endtime = CycleTimer::currentSeconds();
printf("Totat time taken = %lf s\n", endtime - startime);
// Now checking!
double *finalans = new double[dim * dim];
cudacall(cudaMemcpy(finalans, M, sizeof(double) * dim * dim, cudaMemcpyDeviceToHost));
check_cholesky(finalans, orig_sym, dim);
/*for(int i = 0; i < dim ; i++){
for(int j = 0; j < dim ; j++){
printf("%lf ", finalans[i*dim + j]);
}
printf("\n");
}*/
}
|
289805a88fd32183ef0d34c7ba814aefc1aa17fe.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/cuml.hpp>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <cuml/tsa/arima_common.h>
#include <cuml/tsa/batched_arima.hpp>
#include <raft/random/rng.cuh>
#include <raft/cudart_utils.h>
#include "benchmark.cuh"
namespace ML {
namespace Bench {
namespace Arima {
struct ArimaParams {
TimeSeriesParams data;
ARIMAOrder order;
};
template <typename DataT>
class ArimaLoglikelihood : public TsFixtureRandom<DataT> {
public:
ArimaLoglikelihood(const std::string& name, const ArimaParams& p)
: TsFixtureRandom<DataT>(name, p.data), order(p.order) {}
// Note: public function because of the __device__ lambda
void runBenchmark(::benchmark::State& state) override {
using MLCommon::Bench::CudaEventTimer;
auto& handle = *this->handle;
auto stream = handle.get_stream();
auto counting = thrust::make_counting_iterator(0);
// Generate random parameters
int N = order.complexity();
raft::random::Rng gpu_gen(this->params.seed, raft::random::GenPhilox);
gpu_gen.uniform(param, N * this->params.batch_size, -1.0, 1.0, stream);
// Set sigma2 parameters to 1.0
DataT* x = param; // copy the object attribute for thrust
thrust::for_each(thrust::hip::par.on(stream), counting,
counting + this->params.batch_size,
[=] __device__(int bid) { x[(bid + 1) * N - 1] = 1.0; });
CUDA_CHECK(hipStreamSynchronize(stream));
// Benchmark loop
this->loopOnState(state, [this]() {
// Evaluate log-likelihood
batched_loglike(*this->handle, this->data.X, this->params.batch_size,
this->params.n_obs, order, param, loglike, residual, true,
false);
});
}
void allocateBuffers(const ::benchmark::State& state) {
Fixture::allocateBuffers(state);
auto& handle = *this->handle;
auto stream = handle.get_stream();
auto allocator = handle.get_device_allocator();
// Buffer for the model parameters
param = (DataT*)allocator->allocate(
order.complexity() * this->params.batch_size * sizeof(DataT), stream);
// Buffers for the log-likelihood and residuals
loglike = (DataT*)allocator->allocate(
this->params.batch_size * sizeof(DataT), stream);
residual = (DataT*)allocator->allocate(
this->params.batch_size * this->params.n_obs * sizeof(DataT), stream);
}
void deallocateBuffers(const ::benchmark::State& state) {
Fixture::deallocateBuffers(state);
auto& handle = *this->handle;
auto stream = handle.get_stream();
auto allocator = handle.get_device_allocator();
allocator->deallocate(
param, order.complexity() * this->params.batch_size * sizeof(DataT),
stream);
allocator->deallocate(loglike, this->params.batch_size * sizeof(DataT),
stream);
allocator->deallocate(
residual, this->params.batch_size * this->params.n_obs * sizeof(DataT),
stream);
}
protected:
ARIMAOrder order;
DataT* param;
DataT* loglike;
DataT* residual;
};
std::vector<ArimaParams> getInputs() {
struct std::vector<ArimaParams> out;
ArimaParams p;
p.data.seed = 12345ULL;
std::vector<ARIMAOrder> list_order = {{1, 1, 1, 0, 0, 0, 0, 0},
{1, 1, 1, 1, 1, 1, 4, 0},
{1, 1, 1, 1, 1, 1, 12, 0},
{1, 1, 1, 1, 1, 1, 24, 0},
{1, 1, 1, 1, 1, 1, 52, 0}};
std::vector<int> list_batch_size = {10, 100, 1000, 10000};
std::vector<int> list_n_obs = {200, 500, 1000};
for (auto& order : list_order) {
for (auto& batch_size : list_batch_size) {
for (auto& n_obs : list_n_obs) {
p.order = order;
p.data.batch_size = batch_size;
p.data.n_obs = n_obs;
out.push_back(p);
}
}
}
return out;
}
ML_BENCH_REGISTER(ArimaParams, ArimaLoglikelihood<double>, "arima",
getInputs());
} // namespace Arima
} // namespace Bench
} // namespace ML
| 289805a88fd32183ef0d34c7ba814aefc1aa17fe.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/cuml.hpp>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <cuml/tsa/arima_common.h>
#include <cuml/tsa/batched_arima.hpp>
#include <raft/random/rng.cuh>
#include <raft/cudart_utils.h>
#include "benchmark.cuh"
namespace ML {
namespace Bench {
namespace Arima {
struct ArimaParams {
TimeSeriesParams data;
ARIMAOrder order;
};
template <typename DataT>
class ArimaLoglikelihood : public TsFixtureRandom<DataT> {
public:
ArimaLoglikelihood(const std::string& name, const ArimaParams& p)
: TsFixtureRandom<DataT>(name, p.data), order(p.order) {}
// Note: public function because of the __device__ lambda
void runBenchmark(::benchmark::State& state) override {
using MLCommon::Bench::CudaEventTimer;
auto& handle = *this->handle;
auto stream = handle.get_stream();
auto counting = thrust::make_counting_iterator(0);
// Generate random parameters
int N = order.complexity();
raft::random::Rng gpu_gen(this->params.seed, raft::random::GenPhilox);
gpu_gen.uniform(param, N * this->params.batch_size, -1.0, 1.0, stream);
// Set sigma2 parameters to 1.0
DataT* x = param; // copy the object attribute for thrust
thrust::for_each(thrust::cuda::par.on(stream), counting,
counting + this->params.batch_size,
[=] __device__(int bid) { x[(bid + 1) * N - 1] = 1.0; });
CUDA_CHECK(cudaStreamSynchronize(stream));
// Benchmark loop
this->loopOnState(state, [this]() {
// Evaluate log-likelihood
batched_loglike(*this->handle, this->data.X, this->params.batch_size,
this->params.n_obs, order, param, loglike, residual, true,
false);
});
}
void allocateBuffers(const ::benchmark::State& state) {
Fixture::allocateBuffers(state);
auto& handle = *this->handle;
auto stream = handle.get_stream();
auto allocator = handle.get_device_allocator();
// Buffer for the model parameters
param = (DataT*)allocator->allocate(
order.complexity() * this->params.batch_size * sizeof(DataT), stream);
// Buffers for the log-likelihood and residuals
loglike = (DataT*)allocator->allocate(
this->params.batch_size * sizeof(DataT), stream);
residual = (DataT*)allocator->allocate(
this->params.batch_size * this->params.n_obs * sizeof(DataT), stream);
}
void deallocateBuffers(const ::benchmark::State& state) {
Fixture::deallocateBuffers(state);
auto& handle = *this->handle;
auto stream = handle.get_stream();
auto allocator = handle.get_device_allocator();
allocator->deallocate(
param, order.complexity() * this->params.batch_size * sizeof(DataT),
stream);
allocator->deallocate(loglike, this->params.batch_size * sizeof(DataT),
stream);
allocator->deallocate(
residual, this->params.batch_size * this->params.n_obs * sizeof(DataT),
stream);
}
protected:
ARIMAOrder order;
DataT* param;
DataT* loglike;
DataT* residual;
};
std::vector<ArimaParams> getInputs() {
struct std::vector<ArimaParams> out;
ArimaParams p;
p.data.seed = 12345ULL;
std::vector<ARIMAOrder> list_order = {{1, 1, 1, 0, 0, 0, 0, 0},
{1, 1, 1, 1, 1, 1, 4, 0},
{1, 1, 1, 1, 1, 1, 12, 0},
{1, 1, 1, 1, 1, 1, 24, 0},
{1, 1, 1, 1, 1, 1, 52, 0}};
std::vector<int> list_batch_size = {10, 100, 1000, 10000};
std::vector<int> list_n_obs = {200, 500, 1000};
for (auto& order : list_order) {
for (auto& batch_size : list_batch_size) {
for (auto& n_obs : list_n_obs) {
p.order = order;
p.data.batch_size = batch_size;
p.data.n_obs = n_obs;
out.push_back(p);
}
}
}
return out;
}
ML_BENCH_REGISTER(ArimaParams, ArimaLoglikelihood<double>, "arima",
getInputs());
} // namespace Arima
} // namespace Bench
} // namespace ML
|
4cce58bdb850c032edbe47b4740a2ed596958ddb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "transpose_uint32_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uint32_t *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
uint32_t *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
int src_h = 1;
int src_w = 1;
int src_align = 1;
int dst_align = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
transpose_uint32_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,src_h,src_w,src_align,dst_align);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
transpose_uint32_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,src_h,src_w,src_align,dst_align);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
transpose_uint32_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,src_h,src_w,src_align,dst_align);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4cce58bdb850c032edbe47b4740a2ed596958ddb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "transpose_uint32_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uint32_t *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
uint32_t *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
int src_h = 1;
int src_w = 1;
int src_align = 1;
int dst_align = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
transpose_uint32_kernel<<<gridBlock,threadBlock>>>(src,dst,src_h,src_w,src_align,dst_align);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
transpose_uint32_kernel<<<gridBlock,threadBlock>>>(src,dst,src_h,src_w,src_align,dst_align);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
transpose_uint32_kernel<<<gridBlock,threadBlock>>>(src,dst,src_h,src_w,src_align,dst_align);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
f39bde268aa73f886a76bec831b47924c442a04a.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
#define neighbors8 1
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_shuffledPos;
glm::vec3 *dev_shuffledVel;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
hipMalloc((void**)&dev_shuffledPos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_shuffledPos failed!");
hipMalloc((void**)&dev_shuffledVel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_shuffledVel failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
glm::vec3 thisBoidPos = pos[iSelf];
glm::vec3 thisBoidVel = vel[iSelf];
glm::vec3 center = glm::vec3(0.0f);
glm::vec3 seperate = glm::vec3(0.0f);
glm::vec3 cohesion = glm::vec3(0.0f);
int neighborCountRule1 = 0;
int neighborCountRule3 = 0;
for (int j = 0; j < N; j++) {
if (j == iSelf) {
continue;
}
glm::vec3 thatBoidPos = pos[j];
glm::vec3 thatBoidVel = vel[j];
float dist = glm::distance(thisBoidPos, thatBoidPos);
if (dist < rule1Distance) {
center += thatBoidPos;
neighborCountRule1++;
}
if (dist < rule2Distance) {
seperate -= thatBoidPos - thisBoidPos;
}
if (dist < rule3Distance) {
cohesion += thatBoidVel;
neighborCountRule3++;
}
}
if (neighborCountRule1 > 0) {
center /= neighborCountRule1;
thisBoidVel += (center - thisBoidPos) * rule1Scale;
}
if (neighborCountRule3 > 0) {
cohesion /= neighborCountRule3;
thisBoidVel += cohesion * rule3Scale;
}
thisBoidVel += seperate * rule2Scale;
return thisBoidVel;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 velocityChange = computeVelocityChange(N, index, pos, vel1);
glm::vec3 vel2New = vel1[index] + velocityChange;
// Clamp the speed
vel2New = glm::length(vel2New) > maxSpeed ? maxSpeed * glm::normalize(vel2New) : vel2New;
//vel2New = glm::vec3(glm::clamp(vel2New.x, -maxSpeed, maxSpeed),
// glm::clamp(vel2New.y, -maxSpeed, maxSpeed),
// glm::clamp(vel2New.z, -maxSpeed, maxSpeed));
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[index] = vel2New;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
indices[index] = index;
glm::vec3 gridIndex = (pos[index] - gridMin) * inverseCellWidth;
gridIndices[index] = gridIndex3Dto1D(gridIndex.x, gridIndex.y, gridIndex.z, gridResolution);
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
int gridIndex = particleGridIndices[index];
if (index == 0) {
gridCellStartIndices[gridIndex] = 0;
return;
}
else if (index == N - 1) {
gridCellEndIndices[gridIndex] = N;
}
if (particleGridIndices[index] != particleGridIndices[index - 1]) {
gridCellStartIndices[gridIndex] = index;
//end indice is not included in neighbors.
gridCellEndIndices[particleGridIndices[index - 1]] = index;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// - Identify the grid cell that this particle is in
glm::vec3 curPos = pos[index];
glm::vec3 cellPos = (curPos - gridMin) * inverseCellWidth;
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
//float maxdist = glm::max(rule1Distance, glm::max(rule2Distance, rule3Distance));
//glm::ivec3 neighborcellstart = (curPos - maxdist - gridMin) * inverseCellWidth;
//glm::ivec3 neighborcellend = (curPos + maxdist - gridMin) * inverseCellWidth;
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
glm::vec3 thisBoidPos = pos[index];
glm::vec3 thisBoidVel = vel1[index];
glm::vec3 center = glm::vec3(0.0f);
glm::vec3 seperate = glm::vec3(0.0f);
glm::vec3 cohesion = glm::vec3(0.0f);
int neighborCountRule1 = 0;
int neighborCountRule3 = 0;
int xStart = -1, xEnd = 1;
int yStart = -1, yEnd = 1;
int zStart = -1, zEnd = 1;
#if neighbors8
glm::vec3 cellPosFloor = cellPos - glm::floor(cellPos);
if (cellPosFloor.x < 0.5f) {
xEnd = 0;
}
else {
xStart = 0;
}
if (cellPosFloor.y < 0.5f) {
yEnd = 0;
}
else {
yStart = 0;
}
if (cellPosFloor.z < 0.5f) {
zEnd = 0;
}
else {
zStart = 0;
}
#endif
for (int gridz = cellPos.z + zStart; gridz <= cellPos.z + zEnd; gridz++) {
for (int gridy = cellPos.y + yStart; gridy <= cellPos.y + yEnd; gridy++) {
for (int gridx = cellPos.x + xStart; gridx <= cellPos.x + xEnd; gridx++) {
int x = imax(gridx, 0);
int y = imax(gridy, 0);
int z = imax(gridx, 0);
x = imin(gridx, gridResolution - 1);
y = imin(gridy, gridResolution - 1);
z = imin(gridz, gridResolution - 1);
//for (int z = neighborcellstart.z; z <= neighborcellend.z; z++) {
// for (int y = neighborcellstart.y; y <= neighborcellend.y; y++) {
// for (int x = neighborcellstart.x; x <= neighborcellend.x; x++) {
int neighborGridInd = gridIndex3Dto1D(x, y, z, gridResolution);
int startInd = gridCellStartIndices[neighborGridInd];
int endInd = gridCellEndIndices[neighborGridInd];
for (int j = startInd; j < endInd; j++) {
int i = particleArrayIndices[j];
if (i == index) {
continue;
}
glm::vec3 thatBoidPos = pos[i];
glm::vec3 thatBoidVel = vel1[i];
float dist = glm::distance(thisBoidPos, thatBoidPos);
if (dist < rule1Distance) {
center += thatBoidPos;
neighborCountRule1++;
}
if (dist < rule2Distance) {
seperate -= thatBoidPos - thisBoidPos;
}
if (dist < rule3Distance) {
cohesion += thatBoidVel;
neighborCountRule3++;
}
}
}
}
}
if (neighborCountRule1 > 0) {
center /= neighborCountRule1;
thisBoidVel += (center - thisBoidPos) * rule1Scale;
}
if (neighborCountRule3 > 0) {
cohesion /= neighborCountRule3;
thisBoidVel += cohesion * rule3Scale;
}
thisBoidVel += seperate * rule2Scale;
// - Clamp the speed change before putting the new speed in vel2
glm::vec3 vel2New = vel1[index] + thisBoidVel;
vel2New = glm::length(vel2New) > maxSpeed ? maxSpeed * glm::normalize(vel2New) : vel2New;
vel2[index] = vel2New;
}
__global__ void kernReshufflePosVel(int N, glm::vec3 *pos, glm::vec3 *vel, glm::vec3 *shuffledPos, glm::vec3 *shuffledVel, int *particleArrayIndices) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
int sortedInd = particleArrayIndices[index];
shuffledPos[index] = pos[sortedInd];
shuffledVel[index] = vel[sortedInd];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 curPos = pos[index];
glm::vec3 cellPos = (curPos - gridMin) * inverseCellWidth;
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
//float maxDist = glm::max(rule1Distance, glm::max(rule2Distance, rule3Distance));
//glm::ivec3 neighborCellStart = (curPos - maxDist - gridMin) * inverseCellWidth;
//glm::ivec3 neighborCellEnd = (curPos + maxDist - gridMin) * inverseCellWidth;
glm::vec3 thisBoidPos = pos[index];
glm::vec3 thisBoidVel = vel1[index];
glm::vec3 center = glm::vec3(0.0f);
glm::vec3 seperate = glm::vec3(0.0f);
glm::vec3 cohesion = glm::vec3(0.0f);
int neighborCountRule1 = 0;
int neighborCountRule3 = 0;
int xStart = -1, xEnd = 1;
int yStart = -1, yEnd = 1;
int zStart = -1, zEnd = 1;
#if neighbors8
glm::vec3 cellPosFloor = cellPos - glm::floor(cellPos);
if (cellPosFloor.x < 0.5f) {
xEnd = 0;
}
else {
xStart = 0;
}
if (cellPosFloor.y < 0.5f) {
yEnd = 0;
}
else {
yStart = 0;
}
if (cellPosFloor.z < 0.5f) {
zEnd = 0;
}
else {
zStart = 0;
}
#endif
for (int gridz = cellPos.z + zStart; gridz <= cellPos.z + zEnd; gridz++) {
for (int gridy = cellPos.y + yStart; gridy <= cellPos.y + yEnd; gridy++) {
for (int gridx = cellPos.x + xStart; gridx <= cellPos.x + xEnd; gridx++) {
int x = imax(gridx, 0);
int y = imax(gridy, 0);
int z = imax(gridx, 0);
x = imin(gridx, gridResolution - 1);
y = imin(gridy, gridResolution - 1);
z = imin(gridz, gridResolution - 1);
int neighborGridInd = gridIndex3Dto1D(x, y, z, gridResolution);
int startInd = gridCellStartIndices[neighborGridInd];
int endInd = gridCellEndIndices[neighborGridInd];
for (int j = startInd; j < endInd; j++) {
if (j == index) {
continue;
}
glm::vec3 thatBoidPos = pos[j];
glm::vec3 thatBoidVel = vel1[j];
float dist = glm::distance(thisBoidPos, thatBoidPos);
if (dist < rule1Distance) {
center += thatBoidPos;
neighborCountRule1++;
}
if (dist < rule2Distance) {
seperate -= thatBoidPos - thisBoidPos;
}
if (dist < rule3Distance) {
cohesion += thatBoidVel;
neighborCountRule3++;
}
}
}
}
}
if (neighborCountRule1 > 0) {
center /= neighborCountRule1;
thisBoidVel += (center - thisBoidPos) * rule1Scale;
}
if (neighborCountRule3 > 0) {
cohesion /= neighborCountRule3;
thisBoidVel += cohesion * rule3Scale;
}
thisBoidVel += seperate * rule2Scale;
// - Clamp the speed change before putting the new speed in vel2
glm::vec3 vel2New = vel1[index] + thisBoidVel;
vel2New = glm::length(vel2New) > maxSpeed ? maxSpeed * glm::normalize(vel2New) : vel2New;
vel2[index] = vel2New;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// TODO-1.2 ping-pong the velocity buffers
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 fullBlocksPerCell((gridCellCount + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
//kernResetIntBuffer << <fullBlocksPerCell, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
//checkCUDAErrorWithLine("kernResetIntBuffer start array failed!");
//kernResetIntBuffer << <fullBlocksPerCell, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
//checkCUDAErrorWithLine("kernResetIntBuffer end array failed!");
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 fullBlocksPerCell((gridCellCount + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
kernResetIntBuffer << <fullBlocksPerCell, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer start array failed!");
kernResetIntBuffer << <fullBlocksPerCell, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer end array failed!");
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
kernReshufflePosVel << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, dev_vel1, dev_shuffledPos, dev_shuffledVel, dev_particleArrayIndices);
checkCUDAErrorWithLine("kernReshufflePosVel failed!");
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_shuffledPos, dev_shuffledVel, dev_vel1);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_shuffledPos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos failed!");
std::swap(dev_pos, dev_shuffledPos);
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_particleGridIndices);
hipFree(dev_particleArrayIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(dev_shuffledPos);
hipFree(dev_shuffledVel);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| f39bde268aa73f886a76bec831b47924c442a04a.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
#define neighbors8 1
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_shuffledPos;
glm::vec3 *dev_shuffledVel;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
cudaMalloc((void**)&dev_shuffledPos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_shuffledPos failed!");
cudaMalloc((void**)&dev_shuffledVel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_shuffledVel failed!");
cudaDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
glm::vec3 thisBoidPos = pos[iSelf];
glm::vec3 thisBoidVel = vel[iSelf];
glm::vec3 center = glm::vec3(0.0f);
glm::vec3 seperate = glm::vec3(0.0f);
glm::vec3 cohesion = glm::vec3(0.0f);
int neighborCountRule1 = 0;
int neighborCountRule3 = 0;
for (int j = 0; j < N; j++) {
if (j == iSelf) {
continue;
}
glm::vec3 thatBoidPos = pos[j];
glm::vec3 thatBoidVel = vel[j];
float dist = glm::distance(thisBoidPos, thatBoidPos);
if (dist < rule1Distance) {
center += thatBoidPos;
neighborCountRule1++;
}
if (dist < rule2Distance) {
seperate -= thatBoidPos - thisBoidPos;
}
if (dist < rule3Distance) {
cohesion += thatBoidVel;
neighborCountRule3++;
}
}
if (neighborCountRule1 > 0) {
center /= neighborCountRule1;
thisBoidVel += (center - thisBoidPos) * rule1Scale;
}
if (neighborCountRule3 > 0) {
cohesion /= neighborCountRule3;
thisBoidVel += cohesion * rule3Scale;
}
thisBoidVel += seperate * rule2Scale;
return thisBoidVel;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 velocityChange = computeVelocityChange(N, index, pos, vel1);
glm::vec3 vel2New = vel1[index] + velocityChange;
// Clamp the speed
vel2New = glm::length(vel2New) > maxSpeed ? maxSpeed * glm::normalize(vel2New) : vel2New;
//vel2New = glm::vec3(glm::clamp(vel2New.x, -maxSpeed, maxSpeed),
// glm::clamp(vel2New.y, -maxSpeed, maxSpeed),
// glm::clamp(vel2New.z, -maxSpeed, maxSpeed));
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[index] = vel2New;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
indices[index] = index;
glm::vec3 gridIndex = (pos[index] - gridMin) * inverseCellWidth;
gridIndices[index] = gridIndex3Dto1D(gridIndex.x, gridIndex.y, gridIndex.z, gridResolution);
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
int gridIndex = particleGridIndices[index];
if (index == 0) {
gridCellStartIndices[gridIndex] = 0;
return;
}
else if (index == N - 1) {
gridCellEndIndices[gridIndex] = N;
}
if (particleGridIndices[index] != particleGridIndices[index - 1]) {
gridCellStartIndices[gridIndex] = index;
//end indice is not included in neighbors.
gridCellEndIndices[particleGridIndices[index - 1]] = index;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// - Identify the grid cell that this particle is in
glm::vec3 curPos = pos[index];
glm::vec3 cellPos = (curPos - gridMin) * inverseCellWidth;
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
//float maxdist = glm::max(rule1Distance, glm::max(rule2Distance, rule3Distance));
//glm::ivec3 neighborcellstart = (curPos - maxdist - gridMin) * inverseCellWidth;
//glm::ivec3 neighborcellend = (curPos + maxdist - gridMin) * inverseCellWidth;
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
glm::vec3 thisBoidPos = pos[index];
glm::vec3 thisBoidVel = vel1[index];
glm::vec3 center = glm::vec3(0.0f);
glm::vec3 seperate = glm::vec3(0.0f);
glm::vec3 cohesion = glm::vec3(0.0f);
int neighborCountRule1 = 0;
int neighborCountRule3 = 0;
int xStart = -1, xEnd = 1;
int yStart = -1, yEnd = 1;
int zStart = -1, zEnd = 1;
#if neighbors8
glm::vec3 cellPosFloor = cellPos - glm::floor(cellPos);
if (cellPosFloor.x < 0.5f) {
xEnd = 0;
}
else {
xStart = 0;
}
if (cellPosFloor.y < 0.5f) {
yEnd = 0;
}
else {
yStart = 0;
}
if (cellPosFloor.z < 0.5f) {
zEnd = 0;
}
else {
zStart = 0;
}
#endif
for (int gridz = cellPos.z + zStart; gridz <= cellPos.z + zEnd; gridz++) {
for (int gridy = cellPos.y + yStart; gridy <= cellPos.y + yEnd; gridy++) {
for (int gridx = cellPos.x + xStart; gridx <= cellPos.x + xEnd; gridx++) {
int x = imax(gridx, 0);
int y = imax(gridy, 0);
int z = imax(gridx, 0);
x = imin(gridx, gridResolution - 1);
y = imin(gridy, gridResolution - 1);
z = imin(gridz, gridResolution - 1);
//for (int z = neighborcellstart.z; z <= neighborcellend.z; z++) {
// for (int y = neighborcellstart.y; y <= neighborcellend.y; y++) {
// for (int x = neighborcellstart.x; x <= neighborcellend.x; x++) {
int neighborGridInd = gridIndex3Dto1D(x, y, z, gridResolution);
int startInd = gridCellStartIndices[neighborGridInd];
int endInd = gridCellEndIndices[neighborGridInd];
for (int j = startInd; j < endInd; j++) {
int i = particleArrayIndices[j];
if (i == index) {
continue;
}
glm::vec3 thatBoidPos = pos[i];
glm::vec3 thatBoidVel = vel1[i];
float dist = glm::distance(thisBoidPos, thatBoidPos);
if (dist < rule1Distance) {
center += thatBoidPos;
neighborCountRule1++;
}
if (dist < rule2Distance) {
seperate -= thatBoidPos - thisBoidPos;
}
if (dist < rule3Distance) {
cohesion += thatBoidVel;
neighborCountRule3++;
}
}
}
}
}
if (neighborCountRule1 > 0) {
center /= neighborCountRule1;
thisBoidVel += (center - thisBoidPos) * rule1Scale;
}
if (neighborCountRule3 > 0) {
cohesion /= neighborCountRule3;
thisBoidVel += cohesion * rule3Scale;
}
thisBoidVel += seperate * rule2Scale;
// - Clamp the speed change before putting the new speed in vel2
glm::vec3 vel2New = vel1[index] + thisBoidVel;
vel2New = glm::length(vel2New) > maxSpeed ? maxSpeed * glm::normalize(vel2New) : vel2New;
vel2[index] = vel2New;
}
__global__ void kernReshufflePosVel(int N, glm::vec3 *pos, glm::vec3 *vel, glm::vec3 *shuffledPos, glm::vec3 *shuffledVel, int *particleArrayIndices) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
int sortedInd = particleArrayIndices[index];
shuffledPos[index] = pos[sortedInd];
shuffledVel[index] = vel[sortedInd];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 curPos = pos[index];
glm::vec3 cellPos = (curPos - gridMin) * inverseCellWidth;
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
//float maxDist = glm::max(rule1Distance, glm::max(rule2Distance, rule3Distance));
//glm::ivec3 neighborCellStart = (curPos - maxDist - gridMin) * inverseCellWidth;
//glm::ivec3 neighborCellEnd = (curPos + maxDist - gridMin) * inverseCellWidth;
glm::vec3 thisBoidPos = pos[index];
glm::vec3 thisBoidVel = vel1[index];
glm::vec3 center = glm::vec3(0.0f);
glm::vec3 seperate = glm::vec3(0.0f);
glm::vec3 cohesion = glm::vec3(0.0f);
int neighborCountRule1 = 0;
int neighborCountRule3 = 0;
int xStart = -1, xEnd = 1;
int yStart = -1, yEnd = 1;
int zStart = -1, zEnd = 1;
#if neighbors8
glm::vec3 cellPosFloor = cellPos - glm::floor(cellPos);
if (cellPosFloor.x < 0.5f) {
xEnd = 0;
}
else {
xStart = 0;
}
if (cellPosFloor.y < 0.5f) {
yEnd = 0;
}
else {
yStart = 0;
}
if (cellPosFloor.z < 0.5f) {
zEnd = 0;
}
else {
zStart = 0;
}
#endif
for (int gridz = cellPos.z + zStart; gridz <= cellPos.z + zEnd; gridz++) {
for (int gridy = cellPos.y + yStart; gridy <= cellPos.y + yEnd; gridy++) {
for (int gridx = cellPos.x + xStart; gridx <= cellPos.x + xEnd; gridx++) {
int x = imax(gridx, 0);
int y = imax(gridy, 0);
int z = imax(gridx, 0);
x = imin(gridx, gridResolution - 1);
y = imin(gridy, gridResolution - 1);
z = imin(gridz, gridResolution - 1);
int neighborGridInd = gridIndex3Dto1D(x, y, z, gridResolution);
int startInd = gridCellStartIndices[neighborGridInd];
int endInd = gridCellEndIndices[neighborGridInd];
for (int j = startInd; j < endInd; j++) {
if (j == index) {
continue;
}
glm::vec3 thatBoidPos = pos[j];
glm::vec3 thatBoidVel = vel1[j];
float dist = glm::distance(thisBoidPos, thatBoidPos);
if (dist < rule1Distance) {
center += thatBoidPos;
neighborCountRule1++;
}
if (dist < rule2Distance) {
seperate -= thatBoidPos - thisBoidPos;
}
if (dist < rule3Distance) {
cohesion += thatBoidVel;
neighborCountRule3++;
}
}
}
}
}
if (neighborCountRule1 > 0) {
center /= neighborCountRule1;
thisBoidVel += (center - thisBoidPos) * rule1Scale;
}
if (neighborCountRule3 > 0) {
cohesion /= neighborCountRule3;
thisBoidVel += cohesion * rule3Scale;
}
thisBoidVel += seperate * rule2Scale;
// - Clamp the speed change before putting the new speed in vel2
glm::vec3 vel2New = vel1[index] + thisBoidVel;
vel2New = glm::length(vel2New) > maxSpeed ? maxSpeed * glm::normalize(vel2New) : vel2New;
vel2[index] = vel2New;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// TODO-1.2 ping-pong the velocity buffers
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 fullBlocksPerCell((gridCellCount + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
//kernResetIntBuffer << <fullBlocksPerCell, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
//checkCUDAErrorWithLine("kernResetIntBuffer start array failed!");
//kernResetIntBuffer << <fullBlocksPerCell, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
//checkCUDAErrorWithLine("kernResetIntBuffer end array failed!");
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 fullBlocksPerCell((gridCellCount + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
kernResetIntBuffer << <fullBlocksPerCell, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer start array failed!");
kernResetIntBuffer << <fullBlocksPerCell, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer end array failed!");
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
kernReshufflePosVel << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, dev_vel1, dev_shuffledPos, dev_shuffledVel, dev_particleArrayIndices);
checkCUDAErrorWithLine("kernReshufflePosVel failed!");
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_shuffledPos, dev_shuffledVel, dev_vel1);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_shuffledPos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos failed!");
std::swap(dev_pos, dev_shuffledPos);
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_particleGridIndices);
cudaFree(dev_particleArrayIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_shuffledPos);
cudaFree(dev_shuffledVel);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
d70ec49bd64fe3653ac799f8b826ccf82d6b33e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#define EIGEN_USE_GPU
__global__
void NMode32Kernel(const float* A, const int I, const int J, const int S,
const float* B, const int R,
float* C){
int32_t iA = blockIdx.x * blockDim.x + threadIdx.x;
if (iA >= I) {
return;
}
for (int32_t jA = 0; jA < J; jA++) {
int32_t p2 = iA * J + jA;
for (int32_t sA = 0; sA < S; sA++) {
float ts = A[p2 * S + sA];
for (int32_t rB = 0; rB < R; rB++) {
int32_t pB2 = sA * R + rB;
int32_t pC3 = p2 * R + rB;
C[pC3] += ts * B[pB2];
}
}
}
}
void NMode32KernelLauncher(const float* A, const int I, const int J, const int S,
const float* B, const int R,
float* C) {
hipLaunchKernelGGL(( NMode32Kernel), dim3((I + 255) / 256), dim3(256), 0, 0, A, I, J, S, B, R, C);
hipDeviceSynchronize();
}
#endif
| d70ec49bd64fe3653ac799f8b826ccf82d6b33e9.cu | #if GOOGLE_CUDA
#define EIGEN_USE_GPU
__global__
void NMode32Kernel(const float* A, const int I, const int J, const int S,
const float* B, const int R,
float* C){
int32_t iA = blockIdx.x * blockDim.x + threadIdx.x;
if (iA >= I) {
return;
}
for (int32_t jA = 0; jA < J; jA++) {
int32_t p2 = iA * J + jA;
for (int32_t sA = 0; sA < S; sA++) {
float ts = A[p2 * S + sA];
for (int32_t rB = 0; rB < R; rB++) {
int32_t pB2 = sA * R + rB;
int32_t pC3 = p2 * R + rB;
C[pC3] += ts * B[pB2];
}
}
}
}
void NMode32KernelLauncher(const float* A, const int I, const int J, const int S,
const float* B, const int R,
float* C) {
NMode32Kernel<<<(I + 255) / 256, 256>>>(A, I, J, S, B, R, C);
cudaDeviceSynchronize();
}
#endif
|
454f393b4ab0ca64b86532bca474372a7acf6821.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/clag2z.cu, mixed zc -> ds, Wed Jan 2 14:18:50 2019
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to slat2d and zlaset.
*/
__global__
void slag2d_kernel(
int m, int n,
const float *SA, int ldsa,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = MAGMA_D_MAKE( MAGMA_S_REAL( SA[j*ldsa] ), MAGMA_S_IMAG( SA[j*ldsa] ));
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
A[j*lda] = MAGMA_D_MAKE( MAGMA_S_REAL( SA[j*ldsa] ), MAGMA_S_IMAG( SA[j*ldsa] ));
}
}
}
}
/***************************************************************************//**
Purpose
-------
SLAG2D converts a single-real matrix, SA,
to a double-real matrix, A.
Note that while it is possible to overflow while converting
from double to single, it is not possible to overflow when
converting from single to double.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
SA SINGLE PRECISION array, dimension (LDSA,N)
On entry, the M-by-N coefficient matrix SA.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
@param[out]
A DOUBLE PRECISION array, dimension (LDA,N)
On exit, the M-by-N coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lag2
*******************************************************************************/
extern "C" void
magmablas_slag2d(
magma_int_t m, magma_int_t n,
magmaFloat_const_ptr SA, magma_int_t ldsa,
magmaDouble_ptr A, magma_int_t lda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( ldsa < max(1,m) )
*info = -4;
else if ( lda < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
hipLaunchKernelGGL(( slag2d_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, SA, ldsa, A, lda );
}
| 454f393b4ab0ca64b86532bca474372a7acf6821.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/clag2z.cu, mixed zc -> ds, Wed Jan 2 14:18:50 2019
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to slat2d and zlaset.
*/
__global__
void slag2d_kernel(
int m, int n,
const float *SA, int ldsa,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = MAGMA_D_MAKE( MAGMA_S_REAL( SA[j*ldsa] ), MAGMA_S_IMAG( SA[j*ldsa] ));
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
A[j*lda] = MAGMA_D_MAKE( MAGMA_S_REAL( SA[j*ldsa] ), MAGMA_S_IMAG( SA[j*ldsa] ));
}
}
}
}
/***************************************************************************//**
Purpose
-------
SLAG2D converts a single-real matrix, SA,
to a double-real matrix, A.
Note that while it is possible to overflow while converting
from double to single, it is not possible to overflow when
converting from single to double.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
SA SINGLE PRECISION array, dimension (LDSA,N)
On entry, the M-by-N coefficient matrix SA.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
@param[out]
A DOUBLE PRECISION array, dimension (LDA,N)
On exit, the M-by-N coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lag2
*******************************************************************************/
extern "C" void
magmablas_slag2d(
magma_int_t m, magma_int_t n,
magmaFloat_const_ptr SA, magma_int_t ldsa,
magmaDouble_ptr A, magma_int_t lda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( ldsa < max(1,m) )
*info = -4;
else if ( lda < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
slag2d_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, SA, ldsa, A, lda );
}
|
4d7e3092c832445c7fc9e8c4f6eaf92d589e6277.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,int var_4,int var_5,int var_6,float var_7,float var_8,float* var_9,float* var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
for (int i=0; i < var_3; ++i) {
for (int i=0; i < var_4; ++i) {
float tmp_1 = -0.0f;
comp += tmp_1 + +1.7959E5f - -0.0f;
comp += -1.4499E34f + var_7 - coshf(+1.2568E-11f * (+1.5309E-41f - var_8));
for (int i=0; i < var_5; ++i) {
var_9[i] = atan2f(-1.3134E-27f - (+1.0253E-36f / -0.0f * var_11 + var_12), var_13 - (+1.5368E35f - var_14 + -1.3070E-27f));
var_10[i] = var_15 + (-1.7340E28f / (-1.8622E35f * (-1.3255E36f - +1.6146E-37f)));
comp += var_10[i] - var_9[i] - floorf((-1.8950E8f * (+1.8632E34f / var_16)));
comp = -1.4070E-42f - (-1.1692E-42f * +1.1446E-35f * (+0.0f + (var_17 - -1.3687E34f)));
}
for (int i=0; i < var_6; ++i) {
float tmp_2 = +1.2110E-3f;
comp = tmp_2 + -1.9469E-14f / sqrtf(expf(+1.7079E35f));
comp += +1.5617E-41f + var_18 / (var_19 / (+1.3114E-44f + var_20));
comp += (var_21 * -0.0f + (var_22 * (var_23 - +1.9176E-41f / +1.8814E35f)));
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
int tmp_5 = atoi(argv[5]);
int tmp_6 = atoi(argv[6]);
int tmp_7 = atoi(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float* tmp_10 = initPointer( atof(argv[10]) );
float* tmp_11 = initPointer( atof(argv[11]) );
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24);
hipDeviceSynchronize();
return 0;
}
| 4d7e3092c832445c7fc9e8c4f6eaf92d589e6277.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,int var_4,int var_5,int var_6,float var_7,float var_8,float* var_9,float* var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
for (int i=0; i < var_3; ++i) {
for (int i=0; i < var_4; ++i) {
float tmp_1 = -0.0f;
comp += tmp_1 + +1.7959E5f - -0.0f;
comp += -1.4499E34f + var_7 - coshf(+1.2568E-11f * (+1.5309E-41f - var_8));
for (int i=0; i < var_5; ++i) {
var_9[i] = atan2f(-1.3134E-27f - (+1.0253E-36f / -0.0f * var_11 + var_12), var_13 - (+1.5368E35f - var_14 + -1.3070E-27f));
var_10[i] = var_15 + (-1.7340E28f / (-1.8622E35f * (-1.3255E36f - +1.6146E-37f)));
comp += var_10[i] - var_9[i] - floorf((-1.8950E8f * (+1.8632E34f / var_16)));
comp = -1.4070E-42f - (-1.1692E-42f * +1.1446E-35f * (+0.0f + (var_17 - -1.3687E34f)));
}
for (int i=0; i < var_6; ++i) {
float tmp_2 = +1.2110E-3f;
comp = tmp_2 + -1.9469E-14f / sqrtf(expf(+1.7079E35f));
comp += +1.5617E-41f + var_18 / (var_19 / (+1.3114E-44f + var_20));
comp += (var_21 * -0.0f + (var_22 * (var_23 - +1.9176E-41f / +1.8814E35f)));
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
int tmp_5 = atoi(argv[5]);
int tmp_6 = atoi(argv[6]);
int tmp_7 = atoi(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float* tmp_10 = initPointer( atof(argv[10]) );
float* tmp_11 = initPointer( atof(argv[11]) );
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24);
cudaDeviceSynchronize();
return 0;
}
|
07cb443cbeb7919387d9c15998287c1f300bb73a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stereotgv.h"
__global__ void TgvConvertDisparityToDepthKernel(float *disparity, float baseline,
float focal, int width, int height, int stride, float *depth)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
/*float Z = baseline * focal / disparity[pos];
float X = (ix - width / 2)*Z / focal;
float Y = (iy - height / 2)*Z / focal;
depth[pos] = sqrt(Z * Z + X * X + Y * Y);*/
depth[pos] = baseline * focal / disparity[pos];
}
void StereoTgv::ConvertDisparityToDepth(float *disparity, float baseline, float focal, int w, int h, int s, float *depth)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
TgvConvertDisparityToDepthKernel << <blocks, threads >> > (disparity, baseline, focal, w, h, s, depth);
}
//*******************
// Masked
//*******************
__global__ void TgvConvertDisparityToDepthMaskedKernel(float *disparity, float* mask, float baseline,
float focal, int width, int height, int stride, float *depth)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if ((iy >= height) && (ix >= width)) return;
int pos = ix + iy * stride;
if (mask[pos] == 0.0f) return;
depth[pos] = baseline * focal / disparity[pos];
}
void StereoTgv::ConvertDisparityToDepthMasked(float *disparity, float* mask, float baseline, float focal,
int w, int h, int s, float *depth)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
TgvConvertDisparityToDepthMaskedKernel << <blocks, threads >> > (disparity, mask, baseline, focal, w, h, s, depth);
} | 07cb443cbeb7919387d9c15998287c1f300bb73a.cu | #include "stereotgv.h"
__global__ void TgvConvertDisparityToDepthKernel(float *disparity, float baseline,
float focal, int width, int height, int stride, float *depth)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
/*float Z = baseline * focal / disparity[pos];
float X = (ix - width / 2)*Z / focal;
float Y = (iy - height / 2)*Z / focal;
depth[pos] = sqrt(Z * Z + X * X + Y * Y);*/
depth[pos] = baseline * focal / disparity[pos];
}
void StereoTgv::ConvertDisparityToDepth(float *disparity, float baseline, float focal, int w, int h, int s, float *depth)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
TgvConvertDisparityToDepthKernel << <blocks, threads >> > (disparity, baseline, focal, w, h, s, depth);
}
//*******************
// Masked
//*******************
__global__ void TgvConvertDisparityToDepthMaskedKernel(float *disparity, float* mask, float baseline,
float focal, int width, int height, int stride, float *depth)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if ((iy >= height) && (ix >= width)) return;
int pos = ix + iy * stride;
if (mask[pos] == 0.0f) return;
depth[pos] = baseline * focal / disparity[pos];
}
void StereoTgv::ConvertDisparityToDepthMasked(float *disparity, float* mask, float baseline, float focal,
int w, int h, int s, float *depth)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
TgvConvertDisparityToDepthMaskedKernel << <blocks, threads >> > (disparity, mask, baseline, focal, w, h, s, depth);
} |
125dd43cde249cbb45427220090737c2eb677f22.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
// hipMemcpy CPU
//
//
#define SIZE 1000
// 16
#define NUM_BIN 16
// Define kernel function.
__global__ void hist_without_atomic(int *device_b, int *device_a)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int item = device_a[tid];
if (tid < SIZE)
{
device_b[item]++;
}
}
__global__ void hist_with_atomic(int *device_b, int *device_a)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int item = device_a[tid];
if (tid < SIZE)
{
atomicAdd(&(device_b[item]), 1);
}
}
int main(int argc, char **argv)
{
int host_a[SIZE];
for (int i = 0; i < SIZE; ++i)
{
host_a[i] = i % NUM_BIN;
}
int host_b[NUM_BIN];
for (int j = 0; j < NUM_BIN; ++j)
{
host_b[j] = 0;
}
int *device_a, *device_b;
hipMalloc((void**)&device_a, SIZE * sizeof(int));
hipMalloc((void**)&device_b, NUM_BIN * sizeof(int));
hipMemcpy(device_a, host_a, SIZE * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(device_b, host_b, NUM_BIN * sizeof(int), hipMemcpyHostToDevice);
// hist_without_atomic <<< (SIZE + NUM_BIN - 1) / NUM_BIN, NUM_BIN >>> (device_b, device_a);
hipLaunchKernelGGL(( hist_with_atomic) , dim3((SIZE + NUM_BIN - 1) / NUM_BIN), dim3(NUM_BIN) , 0, 0, device_b, device_a);
hipMemcpy(host_b, device_b, NUM_BIN * sizeof(int), hipMemcpyDeviceToHost);
printf("Histogram using 16 bin without shared Memory is: \n");
for (int i = 0; i < NUM_BIN; ++i)
{
printf("bin %d: count %d\n", i, host_b[i]);
}
hipFree(device_a);
hipFree(device_b);
return 0;
} | 125dd43cde249cbb45427220090737c2eb677f22.cu | #include <stdio.h>
#include <cuda_runtime.h>
// 考虑到 cudaMemcpy 传输事件,等于或者大于 CPU 计算的时间。
// 使用 共享内存 来避免数据拷贝传输的问题。
// 需要处理的元素数量
#define SIZE 1000
// 图像灰度等级划分为 16
#define NUM_BIN 16
// Define kernel function.
__global__ void hist_without_atomic(int *device_b, int *device_a)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int item = device_a[tid];
if (tid < SIZE)
{
device_b[item]++;
}
}
__global__ void hist_with_atomic(int *device_b, int *device_a)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int item = device_a[tid];
if (tid < SIZE)
{
atomicAdd(&(device_b[item]), 1);
}
}
int main(int argc, char **argv)
{
int host_a[SIZE];
for (int i = 0; i < SIZE; ++i)
{
host_a[i] = i % NUM_BIN;
}
int host_b[NUM_BIN];
for (int j = 0; j < NUM_BIN; ++j)
{
host_b[j] = 0;
}
int *device_a, *device_b;
cudaMalloc((void**)&device_a, SIZE * sizeof(int));
cudaMalloc((void**)&device_b, NUM_BIN * sizeof(int));
cudaMemcpy(device_a, host_a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(device_b, host_b, NUM_BIN * sizeof(int), cudaMemcpyHostToDevice);
// hist_without_atomic <<< (SIZE + NUM_BIN - 1) / NUM_BIN, NUM_BIN >>> (device_b, device_a);
hist_with_atomic <<< (SIZE + NUM_BIN - 1) / NUM_BIN, NUM_BIN >>> (device_b, device_a);
cudaMemcpy(host_b, device_b, NUM_BIN * sizeof(int), cudaMemcpyDeviceToHost);
printf("Histogram using 16 bin without shared Memory is: \n");
for (int i = 0; i < NUM_BIN; ++i)
{
printf("bin %d: count %d\n", i, host_b[i]);
}
cudaFree(device_a);
cudaFree(device_b);
return 0;
} |
3721f3509e2ffe39fb5df7e4ac06b03a5bed5828.hip | // !!! This is a file automatically generated by hipify!!!
//
//===------------------ GeantX --------------------------------------------===//
//
// Geant Exascale Pilot
//
// For the licensing terms see LICENSE file.
// For the list of contributors see CREDITS file.
// Copyright (C) 2019, Geant Exascale Pilot team, All rights reserved.
//===----------------------------------------------------------------------===//
//
/**
* @file Geant/proxy/src/ProxyElement.cu
* @brief
*/
//===----------------------------------------------------------------------===//
#include "Geant/proxy/ProxyMaterial.cuh"
#include "Geant/proxy/ProxyMaterialTable.cuh"
#include "Geant/proxy/ProxyPhysicalConstants.hpp"
#include "Geant/proxy/ProxyElementUtils.cuh"
namespace geantx {
GEANT_HOST
ProxyMaterial::ProxyMaterial(const char* name, double density,
double z, double a, ProxyMaterialState state)
: fName(name), fState(state)
{
fNumberOfElements = 1;
fState = state;
fDensity = density;
AddElement(new ProxyElement("",z,a),1.0);
ComputeDerivedQuantities();
fState = (fState == kNullState && fDensity > clhep::kGasThreshold) ? kSolid : kGas;
//add to the table
StoreMaterial();
}
GEANT_HOST
ProxyMaterial::ProxyMaterial(const char* name, double density,
int nElements, ProxyMaterialState state)
: fName(name), fState(state)
{
fNumberOfElements = 0;
fDensity = density;
fRadlen = 0;
fIndex = -nElements; // use fIndex as a tempoary counter until this material is filled
fAtomsVector = new int [nElements];
fMassFractionVector = new double [nElements];
fElementVector = new ProxyElement * [nElements];
for(int i = 0 ; i < nElements ; ++i) {
fElementVector[i] = new ProxyElement;
fAtomsVector[i] = 0;
fMassFractionVector[i] = 1.0;
}
fState = (fState == kNullState && fDensity > clhep::kGasThreshold) ? kSolid : kGas;
StoreMaterial();
}
GEANT_HOST
ProxyMaterial::~ProxyMaterial()
{
delete fElementVector;
delete fMassFractionVector;
delete fNumberOfAtomsPerVolume;
}
GEANT_HOST
void ProxyMaterial::StoreMaterial()
{
//add this to the material table
fIndex = ProxyMaterialTable::Instance()->size();
ProxyMaterialTable::Instance()->push_back(this);
}
GEANT_HOST
void ProxyMaterial::Relocate(void* devPtr)
{
#ifdef GEANT_CUDA
char* d_name;
hipMalloc((void **)&(d_name), sizeof(fName));
hipMemcpy(d_name, fName, sizeof(fName), hipMemcpyHostToDevice);
const char* h_name = fName;
fName = d_name;
ProxyElement** h_fElementVector = fElementVector;
size_t vectorSize = 0;
if( fNumberOfElements > 0 ) {
// device pointers in host memory
ProxyElement *vector_d[fNumberOfElements];
for(int i = 0 ; i < fNumberOfElements ; ++i) {
vectorSize += fElementVector[i]->MemorySize();
hipMalloc((void**)&(vector_d[i]), fElementVector[i]->MemorySize());
fElementVector[i]->Relocate(vector_d[i]);
}
// device pointers in device memory
ProxyElement** d_fElementVector;
hipMalloc((void **)&d_fElementVector, vectorSize);
hipMemcpy(d_fElementVector, vector_d, vectorSize, hipMemcpyHostToDevice);
fElementVector = d_fElementVector;
}
hipMemcpy(devPtr, this, MemorySize(), hipMemcpyHostToDevice);
// persistency
fElementVector = h_fElementVector;
fName = h_name;
#endif
}
GEANT_HOST
void ProxyMaterial::ComputeDerivedQuantities()
{
double radinv = 0.0 ;
for (size_t i=0 ; i < fNumberOfElements ; ++i) {
double Zi = fElementVector[i]->GetZ();
double Ai = fElementVector[i]->GetA();
fNumberOfAtomsPerVolume[i] = clhep::Avogadro*fDensity*fMassFractionVector[i]/Ai;
fAtomDensity += fNumberOfAtomsPerVolume[i];
fElectronDensity += fNumberOfAtomsPerVolume[i]*Zi;
radinv += fNumberOfAtomsPerVolume[i]*ProxyElementUtils::ComputeLradTsaiFactor(Zi);
}
fRadlen = (radinv <= 0.0 ? DBL_MAX : 1./radinv);
}
GEANT_HOST
void ProxyMaterial::AddElement(ProxyElement *element, int nAtoms)
{
if(nAtoms <= 0) assert(0);
fElementVector[fNumberOfElements] = element;
fAtomsVector[fNumberOfElements] = nAtoms;
++fNumberOfElements;
++fIndex;
//filled by all proposed numbers of elements
if(fIndex == 0) {
double Amol = 0.;
for (int i=0; i < fNumberOfElements; ++i) {
fMassFractionVector[i] = fAtomsVector[i]*fElementVector[i]->GetA();
Amol += fMassFractionVector[i];
}
for (int i=0; i < fNumberOfElements; ++i) {
fMassFractionVector[i] /= Amol;
}
//calcuate derived quantities
ComputeDerivedQuantities();
}
}
GEANT_HOST
void ProxyMaterial::AddElement(ProxyElement *element, double fraction)
{
if(fraction < 0.0 || fraction > 1.0) {
assert(0);
}
fElementVector[fNumberOfElements] = element;
fMassFractionVector[fNumberOfElements] = fraction;
++fNumberOfElements;
++fIndex;
//filled by all proposed numbers of elements
if(fIndex == 0) {
double Amol = 0.;
for (int i=0; i < fNumberOfElements; ++i) {
Amol += fMassFractionVector[i]*fElementVector[i]->GetA();
}
for (int i=0; i < fNumberOfElements; ++i) {
fAtomsVector[i] = std::lrint(fMassFractionVector[i]*Amol/fElementVector[i]->GetA());
}
//calcuate derived quantities
ComputeDerivedQuantities();
}
}
} //namespace geantx
| 3721f3509e2ffe39fb5df7e4ac06b03a5bed5828.cu | //
//===------------------ GeantX --------------------------------------------===//
//
// Geant Exascale Pilot
//
// For the licensing terms see LICENSE file.
// For the list of contributors see CREDITS file.
// Copyright (C) 2019, Geant Exascale Pilot team, All rights reserved.
//===----------------------------------------------------------------------===//
//
/**
* @file Geant/proxy/src/ProxyElement.cu
* @brief
*/
//===----------------------------------------------------------------------===//
#include "Geant/proxy/ProxyMaterial.cuh"
#include "Geant/proxy/ProxyMaterialTable.cuh"
#include "Geant/proxy/ProxyPhysicalConstants.hpp"
#include "Geant/proxy/ProxyElementUtils.cuh"
namespace geantx {
GEANT_HOST
ProxyMaterial::ProxyMaterial(const char* name, double density,
double z, double a, ProxyMaterialState state)
: fName(name), fState(state)
{
fNumberOfElements = 1;
fState = state;
fDensity = density;
AddElement(new ProxyElement("",z,a),1.0);
ComputeDerivedQuantities();
fState = (fState == kNullState && fDensity > clhep::kGasThreshold) ? kSolid : kGas;
//add to the table
StoreMaterial();
}
GEANT_HOST
ProxyMaterial::ProxyMaterial(const char* name, double density,
int nElements, ProxyMaterialState state)
: fName(name), fState(state)
{
fNumberOfElements = 0;
fDensity = density;
fRadlen = 0;
fIndex = -nElements; // use fIndex as a tempoary counter until this material is filled
fAtomsVector = new int [nElements];
fMassFractionVector = new double [nElements];
fElementVector = new ProxyElement * [nElements];
for(int i = 0 ; i < nElements ; ++i) {
fElementVector[i] = new ProxyElement;
fAtomsVector[i] = 0;
fMassFractionVector[i] = 1.0;
}
fState = (fState == kNullState && fDensity > clhep::kGasThreshold) ? kSolid : kGas;
StoreMaterial();
}
GEANT_HOST
ProxyMaterial::~ProxyMaterial()
{
delete fElementVector;
delete fMassFractionVector;
delete fNumberOfAtomsPerVolume;
}
GEANT_HOST
void ProxyMaterial::StoreMaterial()
{
//add this to the material table
fIndex = ProxyMaterialTable::Instance()->size();
ProxyMaterialTable::Instance()->push_back(this);
}
GEANT_HOST
void ProxyMaterial::Relocate(void* devPtr)
{
#ifdef GEANT_CUDA
char* d_name;
cudaMalloc((void **)&(d_name), sizeof(fName));
cudaMemcpy(d_name, fName, sizeof(fName), cudaMemcpyHostToDevice);
const char* h_name = fName;
fName = d_name;
ProxyElement** h_fElementVector = fElementVector;
size_t vectorSize = 0;
if( fNumberOfElements > 0 ) {
// device pointers in host memory
ProxyElement *vector_d[fNumberOfElements];
for(int i = 0 ; i < fNumberOfElements ; ++i) {
vectorSize += fElementVector[i]->MemorySize();
cudaMalloc((void**)&(vector_d[i]), fElementVector[i]->MemorySize());
fElementVector[i]->Relocate(vector_d[i]);
}
// device pointers in device memory
ProxyElement** d_fElementVector;
cudaMalloc((void **)&d_fElementVector, vectorSize);
cudaMemcpy(d_fElementVector, vector_d, vectorSize, cudaMemcpyHostToDevice);
fElementVector = d_fElementVector;
}
cudaMemcpy(devPtr, this, MemorySize(), cudaMemcpyHostToDevice);
// persistency
fElementVector = h_fElementVector;
fName = h_name;
#endif
}
GEANT_HOST
void ProxyMaterial::ComputeDerivedQuantities()
{
double radinv = 0.0 ;
for (size_t i=0 ; i < fNumberOfElements ; ++i) {
double Zi = fElementVector[i]->GetZ();
double Ai = fElementVector[i]->GetA();
fNumberOfAtomsPerVolume[i] = clhep::Avogadro*fDensity*fMassFractionVector[i]/Ai;
fAtomDensity += fNumberOfAtomsPerVolume[i];
fElectronDensity += fNumberOfAtomsPerVolume[i]*Zi;
radinv += fNumberOfAtomsPerVolume[i]*ProxyElementUtils::ComputeLradTsaiFactor(Zi);
}
fRadlen = (radinv <= 0.0 ? DBL_MAX : 1./radinv);
}
GEANT_HOST
void ProxyMaterial::AddElement(ProxyElement *element, int nAtoms)
{
if(nAtoms <= 0) assert(0);
fElementVector[fNumberOfElements] = element;
fAtomsVector[fNumberOfElements] = nAtoms;
++fNumberOfElements;
++fIndex;
//filled by all proposed numbers of elements
if(fIndex == 0) {
double Amol = 0.;
for (int i=0; i < fNumberOfElements; ++i) {
fMassFractionVector[i] = fAtomsVector[i]*fElementVector[i]->GetA();
Amol += fMassFractionVector[i];
}
for (int i=0; i < fNumberOfElements; ++i) {
fMassFractionVector[i] /= Amol;
}
//calcuate derived quantities
ComputeDerivedQuantities();
}
}
GEANT_HOST
void ProxyMaterial::AddElement(ProxyElement *element, double fraction)
{
if(fraction < 0.0 || fraction > 1.0) {
assert(0);
}
fElementVector[fNumberOfElements] = element;
fMassFractionVector[fNumberOfElements] = fraction;
++fNumberOfElements;
++fIndex;
//filled by all proposed numbers of elements
if(fIndex == 0) {
double Amol = 0.;
for (int i=0; i < fNumberOfElements; ++i) {
Amol += fMassFractionVector[i]*fElementVector[i]->GetA();
}
for (int i=0; i < fNumberOfElements; ++i) {
fAtomsVector[i] = std::lrint(fMassFractionVector[i]*Amol/fElementVector[i]->GetA());
}
//calcuate derived quantities
ComputeDerivedQuantities();
}
}
} //namespace geantx
|
5fb9e6c4d4a12fbc19f2c1433578b6c63526bcd5.hip | // !!! This is a file automatically generated by hipify!!!
/* Julia_set_serial.cu
* Created on: Mar 3, 2018
* Julia set code by Abdallah Mohamed
* Other files by EasyBMP (see BSD_(revised)_license.txt)
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "EasyBMP.h"
//Complex number definition
struct Complex { // typedef is not required for C++
float x; // real part is represented on x-axis in output image
float y; // imaginary part is represented by y-axis in output image
};
//Function declarations
__global__ void compute_julia(const char*, int, int, uchar4*);
void save_image(uchar4*, const char*, int, int);
Complex add(Complex, Complex);
Complex mul(Complex, Complex);
float mag(Complex);
//main function
int main(void) {
char n[] = "test.bmp";
char* name = n;
int N = 3000 * 3000;
dim3 blockSize(32, 32);
dim3 gridSize(3000, 3000);
uchar4* pixels = (uchar4*)malloc(N * sizeof(uchar4));
uchar4* d_pixels;
hipMalloc(&d_pixels, sizeof(uchar4) * N);
hipLaunchKernelGGL(( compute_julia), dim3(gridSize),dim3(blockSize), 0, 0, name, 3000, 3000,d_pixels); //width x height
hipMemcpy(pixels, d_pixels, N * sizeof(uchar4), hipMemcpyDeviceToHost);
save_image(pixels, name, 3000, 3000);
printf("Finished creating %s.\n", name);
free(pixels);
free(d_pixels);
return 0;
}
// parallel implementation of Julia set
__global__ void compute_julia(const char* filename, int width, int height, uchar4* pixels) {
//create output image
int max_iterations = 400;
int infinity = 20; //used to check if z goes towards infinity
Complex c = { 0.285, 0.01 };
// ***** Size ****: higher w means smaller size
float w = 4;
float h = w * height / width; //preserve aspect ratio
// LIMITS for each pixel
float x_min = -w / 2, y_min = -h / 2;
float x_incr = w / width, y_incr = h / height;
/************Parallized For loop***********************/
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if ((y < height) && (x < width)) {
Complex z;
z.x = x_min + x * x_incr;
z.y = y_min + y * y_incr;
int n = 0;
do {
z = add(mul(z, z), c);
} while (mag(z) < infinity && n++ < max_iterations);
if (n == max_iterations) { // if we reach max_iterations before z reaches infinity, pixel is black
pixels[x + y * width] = { 0,0,0,0 };
}
else { // if z reaches infinity, pixel color is based on how long it takes z to go to infinity
unsigned char hue = (unsigned char)(255 *(sqrt((float)n / max_iterations*2)));
pixels[x + y * width] = { hue,hue,hue,0};
}
}
}
void save_image(uchar4* pixels, const char* filename, int width, int height) {
BMP output;
output.SetSize(width, height);
output.SetBitDepth(24);
// save each pixel to output image
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
uchar4 color = pixels[col + row * width];
output(col, row)->Red = color.x;
output(col, row)->Green = color.y;
output(col, row)->Blue = color.z;
}
}
output.WriteToFile(filename);
}
__device__ Complex add(Complex c1, Complex c2) {
return{ c1.x + c2.x, c1.y + c2.y };
}
__device__ Complex mul(Complex c1, Complex c2) {
return{ c1.x * c2.x - c1.y * c2.y, c1.x * c2.y + c2.x * c1.y };
}
__device__ float mag(Complex c) {
return (float)sqrt((double)(c.x * c.x + c.y * c.y));
} | 5fb9e6c4d4a12fbc19f2c1433578b6c63526bcd5.cu |
/* Julia_set_serial.cu
* Created on: Mar 3, 2018
* Julia set code by Abdallah Mohamed
* Other files by EasyBMP (see BSD_(revised)_license.txt)
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "EasyBMP.h"
//Complex number definition
struct Complex { // typedef is not required for C++
float x; // real part is represented on x-axis in output image
float y; // imaginary part is represented by y-axis in output image
};
//Function declarations
__global__ void compute_julia(const char*, int, int, uchar4*);
void save_image(uchar4*, const char*, int, int);
Complex add(Complex, Complex);
Complex mul(Complex, Complex);
float mag(Complex);
//main function
int main(void) {
char n[] = "test.bmp";
char* name = n;
int N = 3000 * 3000;
dim3 blockSize(32, 32);
dim3 gridSize(3000, 3000);
uchar4* pixels = (uchar4*)malloc(N * sizeof(uchar4));
uchar4* d_pixels;
cudaMalloc(&d_pixels, sizeof(uchar4) * N);
compute_julia<<<gridSize,blockSize>>>(name, 3000, 3000,d_pixels); //width x height
cudaMemcpy(pixels, d_pixels, N * sizeof(uchar4), cudaMemcpyDeviceToHost);
save_image(pixels, name, 3000, 3000);
printf("Finished creating %s.\n", name);
free(pixels);
free(d_pixels);
return 0;
}
// parallel implementation of Julia set
__global__ void compute_julia(const char* filename, int width, int height, uchar4* pixels) {
//create output image
int max_iterations = 400;
int infinity = 20; //used to check if z goes towards infinity
Complex c = { 0.285, 0.01 };
// ***** Size ****: higher w means smaller size
float w = 4;
float h = w * height / width; //preserve aspect ratio
// LIMITS for each pixel
float x_min = -w / 2, y_min = -h / 2;
float x_incr = w / width, y_incr = h / height;
/************Parallized For loop***********************/
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if ((y < height) && (x < width)) {
Complex z;
z.x = x_min + x * x_incr;
z.y = y_min + y * y_incr;
int n = 0;
do {
z = add(mul(z, z), c);
} while (mag(z) < infinity && n++ < max_iterations);
if (n == max_iterations) { // if we reach max_iterations before z reaches infinity, pixel is black
pixels[x + y * width] = { 0,0,0,0 };
}
else { // if z reaches infinity, pixel color is based on how long it takes z to go to infinity
unsigned char hue = (unsigned char)(255 *(sqrt((float)n / max_iterations*2)));
pixels[x + y * width] = { hue,hue,hue,0};
}
}
}
void save_image(uchar4* pixels, const char* filename, int width, int height) {
BMP output;
output.SetSize(width, height);
output.SetBitDepth(24);
// save each pixel to output image
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
uchar4 color = pixels[col + row * width];
output(col, row)->Red = color.x;
output(col, row)->Green = color.y;
output(col, row)->Blue = color.z;
}
}
output.WriteToFile(filename);
}
__device__ Complex add(Complex c1, Complex c2) {
return{ c1.x + c2.x, c1.y + c2.y };
}
__device__ Complex mul(Complex c1, Complex c2) {
return{ c1.x * c2.x - c1.y * c2.y, c1.x * c2.y + c2.x * c1.y };
}
__device__ float mag(Complex c) {
return (float)sqrt((double)(c.x * c.x + c.y * c.y));
} |
bd0f60575083b0bf1f26ab8f7b51c12a91a4e0f9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cmath>
#include <cuml/ensemble/randomforest.hpp>
#include <utility>
#include "benchmark.cuh"
namespace ML {
namespace Bench {
namespace rf {
struct RegParams {
DatasetParams data;
RegressionParams regression;
RF_params rf;
};
template <typename D>
struct RFRegressorModel {};
template <>
struct RFRegressorModel<float> {
ML::RandomForestRegressorF model;
};
template <>
struct RFRegressorModel<double> {
ML::RandomForestRegressorD model;
};
template <typename D>
class RFRegressor : public RegressionFixture<D> {
public:
RFRegressor(const std::string& name, const RegParams& p)
: RegressionFixture<D>(name, p.data, p.regression), rfParams(p.rf) {}
protected:
void runBenchmark(::benchmark::State& state) override {
using MLCommon::Bench::CudaEventTimer;
if (this->params.rowMajor) {
state.SkipWithError("RFRegressor only supports col-major inputs");
}
this->loopOnState(state, [this]() {
auto* mPtr = &model.model;
mPtr->trees = nullptr;
fit(*this->handle, mPtr, this->data.X, this->params.nrows,
this->params.ncols, this->data.y, rfParams);
CUDA_CHECK(hipStreamSynchronize(this->stream));
});
}
private:
RFRegressorModel<D> model;
RF_params rfParams;
};
template <typename D>
std::vector<RegParams> getInputs() {
struct DimInfo {
int nrows, ncols, n_informative;
};
struct std::vector<RegParams> out;
RegParams p;
p.data.rowMajor = false;
p.regression = {
.shuffle = true, // Better to shuffle when n_informative < ncols
.effective_rank = -1, // dataset generation will be faster
.bias = 4.5,
.tail_strength = 0.5, // unused when effective_rank = -1
.noise = 1.0,
.seed = 12345ULL};
p.rf = set_rf_params(10, /*max_depth */
(1 << 20), /* max_leaves */
0.3, /* max_features */
32, /* n_bins */
1, /* split_algo */
3, /* min_samples_leaf */
3, /* min_samples_split */
0.0f, /* min_impurity_decrease */
true, /* bootstrap_features */
true, /* bootstrap */
500, /* n_trees */
1.f, /* max_samples */
1234ULL, /* seed */
ML::CRITERION::MSE, /* split_criterion */
false, /* quantile_per_tree */
8, /* n_streams */
false, /* use_experimental_backend */
128 /* max_batch_size */
);
std::vector<DimInfo> dim_info = {{500000, 500, 400}};
for (auto& di : dim_info) {
// Let's run Bosch only for float type
if (!std::is_same<D, float>::value && di.ncols == 968) continue;
p.data.nrows = di.nrows;
p.data.ncols = di.ncols;
p.regression.n_informative = di.n_informative;
p.rf.tree_params.max_features = 1.f;
for (auto max_depth : std::vector<int>({7, 11, 15})) {
p.rf.tree_params.max_depth = max_depth;
out.push_back(p);
}
}
return out;
}
ML_BENCH_REGISTER(RegParams, RFRegressor<float>, "regression",
getInputs<float>());
ML_BENCH_REGISTER(RegParams, RFRegressor<double>, "regression",
getInputs<double>());
} // namespace rf
} // namespace Bench
} // namespace ML
| bd0f60575083b0bf1f26ab8f7b51c12a91a4e0f9.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cmath>
#include <cuml/ensemble/randomforest.hpp>
#include <utility>
#include "benchmark.cuh"
namespace ML {
namespace Bench {
namespace rf {
struct RegParams {
DatasetParams data;
RegressionParams regression;
RF_params rf;
};
template <typename D>
struct RFRegressorModel {};
template <>
struct RFRegressorModel<float> {
ML::RandomForestRegressorF model;
};
template <>
struct RFRegressorModel<double> {
ML::RandomForestRegressorD model;
};
template <typename D>
class RFRegressor : public RegressionFixture<D> {
public:
RFRegressor(const std::string& name, const RegParams& p)
: RegressionFixture<D>(name, p.data, p.regression), rfParams(p.rf) {}
protected:
void runBenchmark(::benchmark::State& state) override {
using MLCommon::Bench::CudaEventTimer;
if (this->params.rowMajor) {
state.SkipWithError("RFRegressor only supports col-major inputs");
}
this->loopOnState(state, [this]() {
auto* mPtr = &model.model;
mPtr->trees = nullptr;
fit(*this->handle, mPtr, this->data.X, this->params.nrows,
this->params.ncols, this->data.y, rfParams);
CUDA_CHECK(cudaStreamSynchronize(this->stream));
});
}
private:
RFRegressorModel<D> model;
RF_params rfParams;
};
template <typename D>
std::vector<RegParams> getInputs() {
struct DimInfo {
int nrows, ncols, n_informative;
};
struct std::vector<RegParams> out;
RegParams p;
p.data.rowMajor = false;
p.regression = {
.shuffle = true, // Better to shuffle when n_informative < ncols
.effective_rank = -1, // dataset generation will be faster
.bias = 4.5,
.tail_strength = 0.5, // unused when effective_rank = -1
.noise = 1.0,
.seed = 12345ULL};
p.rf = set_rf_params(10, /*max_depth */
(1 << 20), /* max_leaves */
0.3, /* max_features */
32, /* n_bins */
1, /* split_algo */
3, /* min_samples_leaf */
3, /* min_samples_split */
0.0f, /* min_impurity_decrease */
true, /* bootstrap_features */
true, /* bootstrap */
500, /* n_trees */
1.f, /* max_samples */
1234ULL, /* seed */
ML::CRITERION::MSE, /* split_criterion */
false, /* quantile_per_tree */
8, /* n_streams */
false, /* use_experimental_backend */
128 /* max_batch_size */
);
std::vector<DimInfo> dim_info = {{500000, 500, 400}};
for (auto& di : dim_info) {
// Let's run Bosch only for float type
if (!std::is_same<D, float>::value && di.ncols == 968) continue;
p.data.nrows = di.nrows;
p.data.ncols = di.ncols;
p.regression.n_informative = di.n_informative;
p.rf.tree_params.max_features = 1.f;
for (auto max_depth : std::vector<int>({7, 11, 15})) {
p.rf.tree_params.max_depth = max_depth;
out.push_back(p);
}
}
return out;
}
ML_BENCH_REGISTER(RegParams, RFRegressor<float>, "regression",
getInputs<float>());
ML_BENCH_REGISTER(RegParams, RFRegressor<double>, "regression",
getInputs<double>());
} // namespace rf
} // namespace Bench
} // namespace ML
|
ff93ae684dfb161ffd83fbb6e157c519c2cd2383.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// cudafeature/feature-spectral-batched_kernels.cu
//
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
// Justin Luitjens, Levi Barnes
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#if HAVE_CUDA == 1
#include <roctracer/roctx.h>
#include <hipcub/hipcub.hpp>
#endif
#include "cudafeat/feature-spectral-batched-kernels.h"
#include "cudafeat/feature-spectral-cuda.h"
#include "cudafeat/lane-desc.h"
#include "cudamatrix/cu-rand.h"
namespace kaldi {
// Mimics the functionality of mel_banks_compute_kernel
// (found in feature-spectral-cuda.cu). The 3rd
// dimension (z) of the block grid gives the hardware
// "lane". lanes tells us which channel is in this lane,
// what current frame and sample are processed in this
// batch, etc.
__global__ void batched_mel_banks_compute_kernel(
const LaneDesc *lanes, int32_t n_lanes, int32_t max_chunk_frames,
float energy_floor, int32 *offsets, int32 *sizes, float **vecs,
const float *feats, int32_t ldf, float *mels, int32_t ldm, bool use_log) {
// Specialize WarpReduce for type float
typedef hipcub::WarpReduce<float> WarpReduce;
// Allocate WarpReduce shared memory for 8 warps
__shared__ typename WarpReduce::TempStorage temp_storage[8];
// warp will work together to compute sum
int tid = threadIdx.x;
int wid = threadIdx.y;
// blocks in the x dimension take different bins
int bin = blockIdx.x;
// frame is a combination of blocks in the y dimension and threads in the y
// dimension
int frame = blockIdx.y * blockDim.y + threadIdx.y;
int lane = blockIdx.z;
LaneDesc desc = lanes[lane];
int num_frames = desc.num_chunk_frames;
// TODO get offsets, sizes, and vecs from laneInfo?
int offset = offsets[bin];
int size = sizes[bin];
const float *v = vecs[bin];
const float *w = feats + frame * ldf + lane * max_chunk_frames * ldf + offset;
// perfom local sum
float sum = 0;
if (frame < num_frames) { // exclude frames beyond the end
for (int idx = tid; idx < size; idx += 32) {
sum += v[idx] * w[idx];
}
}
// Sum in cub
sum = WarpReduce(temp_storage[wid]).Sum(sum);
if (tid == 0 && frame < num_frames) {
if (use_log) {
// avoid log of zero
if (sum < energy_floor) sum = energy_floor;
float val = logf(sum);
mels[lane * max_chunk_frames * ldm + frame * ldm + bin] = val;
} else {
mels[lane * max_chunk_frames * ldm + frame * ldm + bin] = sum;
}
}
}
// Mimics the functionality of apply_lifter_and_floor_energy
// (found in feature-spectral-cuda.cu) for a chunk of data
// from several audio channels. The 2nd dimension
// (y) of the block grid gives the hardware "lane".
// The lanes array tells us which channel is in this lane,
// what current frame and sample are processed in this
// batch, etc.
__global__ void batched_apply_lifter_and_floor_energy_kernel(
const LaneDesc *lanes, int32_t n_lanes, int32_t max_chunk_frames,
int num_cols, float cepstral_lifter, bool use_energy, float energy_floor,
float *log_energy, int32_t ldl, float *lifter_coeffs, float *features,
int32_t ldf) {
int thread_id = threadIdx.x;
int frame = blockIdx.x;
int lane = blockIdx.y;
LaneDesc desc = lanes[lane];
if (frame > desc.num_chunk_frames) return;
float *feats = features + frame * ldf + lane * max_chunk_frames * ldf;
// apply lifter coefficients
if (cepstral_lifter != 0.0f) {
for (int c = thread_id; c < num_cols; c += CU1DBLOCK) {
float lift = lifter_coeffs[c];
float f = feats[c];
feats[c] = f * lift;
}
}
// Thread 0 for each frame will apply energy
if (use_energy && thread_id == 0) {
float energy = log_energy[frame + ldl * lane];
float log_energy_floor = log(energy_floor);
if (energy_floor > 0.0f && energy < log_energy_floor) {
energy = log_energy_floor;
}
feats[0] = energy;
}
}
// Mimics the functionality of process_window_kernel
// (found in feature-spectral-cuda.cu) for a chunk of data
// from several audio channels. The 2nd dimension
// (y) of the block grid gives the hardware "lane".
// The lanes array tells us which channel is in this lane,
// what current frame and sample are processed in this
// batch, etc.
__global__ void batched_process_window_kernel(
const LaneDesc *lanes, int32_t n_lanes, int32_t max_chunk_frames,
int frame_length, float dither, float energy_floor, bool remove_dc_offset,
float preemph_coeff, bool need_raw_log_energy, float *log_energy_pre_window,
int32_t lde, const float *windowing, float *tmp_windows, int32_t ldt,
float *windows, int32_t ldw) {
// Specialize WarpReduce for type float
typedef hipcub::BlockReduce<float, CU1DBLOCK> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int thread_id = threadIdx.x;
int row = blockIdx.x;
int lane = blockIdx.y;
LaneDesc desc = lanes[lane];
if (row >= desc.num_chunk_frames) return;
float *tmp_window = tmp_windows + row * ldt + lane * max_chunk_frames * ldt;
float *window = windows + row * ldw + lane * max_chunk_frames * ldw;
__shared__ float ssum;
float sum = 0;
float wdot = 0;
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
// tmp_window contains optional dither. Apply that on read.
float wval = window[idx];
if (dither != 0.0f) {
wval += tmp_window[idx] * dither;
}
// compute local sum for removing dc offset
sum += wval;
// compute dot product for log energy
wdot += wval * wval;
float windowing_mul = 1;
if (remove_dc_offset == false && preemph_coeff == 0.0f) {
// we are done here so set windowing multiplication on write.
windowing_mul = windowing[idx];
}
// write dithered output
window[idx] = wval * windowing_mul;
}
__syncthreads();
if (remove_dc_offset) {
// we will recompute this below
wdot = 0.0f;
// use cub to reduce
sum = BlockReduce(temp_storage).Sum(sum);
// broadcast sum to entire block
if (thread_id == 0) ssum = sum;
__syncthreads();
sum = -ssum / frame_length;
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
float windowing_mul = 1;
float *out = window;
if (preemph_coeff == 0.0f) {
// we are done here so apply windowing
windowing_mul = windowing[idx];
} else {
// write to temp window as we will copy back into window
// when doing pre-emphasis
out = tmp_window;
}
// updated window value
float wval = window[idx] + sum;
// compute new dot product with dc offset removed
wdot += wval * wval;
// write output
out[idx] = wval * windowing_mul;
}
}
__syncthreads();
// if pointer is not NULL we will set energy to either
// the computed energy or 0 depending on need_raw_log_energy
if (log_energy_pre_window != NULL) {
float energy = 0.0f;
if (need_raw_log_energy) {
// must sync to use retemp_storage
if (remove_dc_offset) __syncthreads();
// use cub to reduce
wdot = BlockReduce(temp_storage).Sum(wdot);
energy = max(wdot, energy_floor);
}
if (thread_id == 0) {
log_energy_pre_window[row + lane * lde] = log(energy);
}
}
// TODO this could be more efficient using shared memory instead of
// tmp_window.
if (preemph_coeff != 0.0f) {
// wait for tmp_window to be computed
__threadfence();
__syncthreads();
// starting thread idx at 0 to keep writes aligned.
// unaligned reads are less painful then unaligned writes
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
float wval = tmp_window[idx];
float prev_window = wval;
if (idx > 0) {
prev_window = tmp_window[idx - 1];
}
// use __fmul_rn to match CPU
// window[idx] = (wval - preemph_coeff*prev_window) * windowing[idx];
window[idx] =
(wval - __fmul_rn(preemph_coeff, prev_window)) * windowing[idx];
}
}
}
__host__ __device__ inline int32 FirstSampleOfFrame(int32 frame,
int32 frame_shift,
int32 window_size,
bool snip_edges) {
if (snip_edges) {
return frame * frame_shift;
} else {
int32 midpoint_of_frame = frame_shift * frame + frame_shift / 2,
beginning_of_frame = midpoint_of_frame - window_size / 2;
return beginning_of_frame;
}
}
// Mimics the functionality of extract_window_kernel
// (found in feature-spectral-cuda.cu) for a chunk of data
// from several audio channels. The 2nd dimension
// (y) of the block grid gives the hardware "lane".
// The lanes array tells us which channel is in this lane,
// what current frame and sample are processed in this
// batch, etc.
// Extra samples not processed in this chunk are moved to
// "stash" where they'll be pre-pended to the next chunk
// from this channel
__global__ void batched_extract_window_kernel(
const LaneDesc *lanes, int32_t num_lanes, int32 frame_shift,
int32 frame_length, int32 frame_length_padded, bool snip_edges,
const BaseFloat __restrict__ *wave, int32_t ldw,
BaseFloat *__restrict__ windows, int32_t window_size, int32_t wlda,
BaseFloat *stash, int32_t ssize, int32_t lds) {
// local frame number
int32_t fidx = blockIdx.x;
int32_t tidx = threadIdx.x;
int32_t lane = blockIdx.y;
const LaneDesc desc = lanes[lane];
ChannelId channel = desc.channel;
// This is the current sample that is pointed to by wave
int32_t current_sample = desc.current_sample;
// current frame we are computing in global space
int32_t current_frame = desc.current_frame;
// global frame number computed by this block
int32_t global_frame = current_frame + fidx;
int32_t num_chunk_samples = desc.num_chunk_samples;
if (fidx > desc.num_chunk_frames) return;
// offset input/output by channels or lanes
stash = stash + channel * lds;
wave = wave + lane * ldw;
BaseFloat *window = windows + fidx * wlda + gridDim.x * lane * wlda;
// This is the first sample needed to compute this frame
int32_t start_sample =
FirstSampleOfFrame(global_frame, frame_shift, window_size, snip_edges);
// Sample offset is how much we have to offset our index
// into the input wave.
int32_t wave_start = start_sample - current_sample;
// wave_start and wave_end are start and end indexes into 'wave', for the
// piece of wave that we're trying to extract.
int32_t wave_end = wave_start + frame_length;
// wave_start will be negative on successive chunks as we need
// to grab context from stash.
if ((current_frame > 0 || wave_start >= 0) && wave_end <= num_chunk_samples) {
// the normal case-- no edge effects to consider.
for (int i = tidx; i < frame_length; i += blockDim.x) {
int32_t widx = wave_start + i;
BaseFloat val;
if (widx >= 0) {
val = wave[widx];
} else {
// widx is negative. Add it to the stash size
// to get the correct index into the stash
int32_t sidx = ssize + widx;
val = stash[sidx];
}
window[i] = val;
}
} else {
// Deal with any end effects by reflection, if needed. This code will only
// be reached for about two frames per utterance, so we don't concern
// ourselves excessively with efficiency.
for (int s = tidx; s < frame_length; s += blockDim.x) {
int32 s_in_wave = wave_start + s;
while (s_in_wave < 0 || s_in_wave >= num_chunk_samples) {
// reflect around the beginning or end of the wave.
// e.g. -1 -> 0, -2 -> 1.
// dim -> dim - 1, dim + 1 -> dim - 2.
// the code supports repeated reflections, although this
// would only be needed in pathological cases.
if (s_in_wave < 0)
s_in_wave = -s_in_wave - 1;
else
s_in_wave = 2 * num_chunk_samples - 1 - s_in_wave;
}
window[s] = wave[s_in_wave];
}
}
if (frame_length_padded > frame_length) {
for (int i = frame_length + tidx; i < frame_length_padded;
i += blockDim.x) {
window[i] = 0.0f;
}
}
}
// For each frame
// compute logf(dot(signal_frame, signal_frame))
// This is the batched version. The y-dimension of the grid
// give the lane number
__global__ void batched_dot_log_kernel(int32_t max_chunk_frames,
int32_t frame_length,
float *signal_frame, int32_t lds,
float *signal_log_energy, int32_t lde) {
// Specialize WarpReduce for type float
typedef hipcub::BlockReduce<float, CU1DBLOCK> BlockReduce;
// Allocate WarpReduce shared memory for 8 warps
__shared__ typename BlockReduce::TempStorage temp_storage;
int32_t frame = blockIdx.x;
int32_t tid = threadIdx.x;
int32_t lane = blockIdx.y;
float *in = signal_frame + frame * lds + max_chunk_frames * lane * lds;
float sum = 0;
// preform local dot product
for (int32_t i = tid; i < frame_length; i += blockDim.x) {
float val = in[i];
sum += val * val;
}
// reduce using cub
sum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
signal_log_energy[frame + lane * lde] = logf(sum);
}
}
__global__ void batched_update_stash_kernel(const LaneDesc *lanes,
int32_t num_lanes,
const BaseFloat *wave, int32_t ldw,
BaseFloat *stash, int32_t num_stash,
int32_t lds) {
int32_t lane = blockIdx.x;
LaneDesc desc = lanes[lane];
int32_t channel = desc.channel;
int32_t num_chunk_samples = desc.num_chunk_samples;
// offset memory by lane or channel
wave = wave + lane * ldw;
stash = stash + channel * lds;
int32_t sample_offset = num_chunk_samples - num_stash;
for (int i = threadIdx.x; i < num_stash; i += blockDim.x) {
int32_t idx = sample_offset + i;
float val;
if (idx < 0) {
// data must come from old stash
val = stash[idx + num_stash];
} else {
// data comes from new wave
val = wave[idx];
}
__syncthreads();
stash[i] = val;
}
}
// Each threadblock computes a different row of the matrix.
// Threads in the same block compute the row collaboratively.
// This kernel must be called out of place (A_in!=A_out).
__global__ void power_spectrum_kernel(int row_length, const float *A_in, int32_t ldi,
float *A_out, int32_t ldo,
bool use_power) {
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
const float *Ar = A_in + block_id * ldi;
float *Aw = A_out + block_id * ldo;
int half_length = row_length / 2;
for (int idx = thread_id; idx < half_length; idx += CU1DBLOCK) {
// ignore special case
if (idx == 0) continue;
float2 val = reinterpret_cast<const float2 *>(Ar)[idx];
float ret = val.x * val.x + val.y * val.y;
if (use_power) {
Aw[idx] = ret;
} else {
Aw[idx] = sqrtf(ret);
}
}
// handle special case
if (threadIdx.x == 0) {
float real = Ar[0];
// cufft puts this at the end, this is different than kaldi does with its
// own
// internal implementation
float im = Ar[row_length];
if (use_power) {
Aw[0] = real * real;
Aw[half_length] = im * im;
} else {
Aw[0] = fabs(real);
Aw[half_length] = fabs(im);
}
}
}
void cuda_power_spectrum(int32_t max_chunk_frames, int32_t num_lanes,
int row_length, const float *A_in, int32_t ldi,
float *A_out, int32_t ldo, bool use_power) {
hipLaunchKernelGGL(( power_spectrum_kernel), dim3(max_chunk_frames * num_lanes), dim3(CU1DBLOCK), 0, 0,
row_length, A_in, ldi, A_out, ldo, use_power);
}
void cuda_mel_banks_compute(const LaneDesc *lanes, int32_t num_lanes,
int32_t max_chunk_frames, int32_t num_bins,
float energy_floor, int32 *offsets, int32 *sizes,
float **vecs, const float *feats, int32_t ldf,
float *mels, int32_t ldm, bool use_log) {
dim3 Bl(32, 8);
dim3 Gr(num_bins, (max_chunk_frames + Bl.y - 1) / Bl.y, num_lanes);
hipLaunchKernelGGL(( batched_mel_banks_compute_kernel), dim3(Gr), dim3(Bl), 0, 0,
lanes, num_lanes, max_chunk_frames, energy_floor, offsets, sizes, vecs,
feats, ldf, mels, ldm, use_log);
}
void cuda_apply_lifter_and_floor_energy(const LaneDesc *lanes,
int32_t num_lanes,
int32_t max_chunk_frames, int num_cols,
float cepstral_lifter, bool use_energy,
float energy_floor, float *log_energy,
int32_t ldl, float *lifter_coeffs,
float *features, int32_t ldf) {
dim3 Gr(max_chunk_frames, num_lanes);
hipLaunchKernelGGL(( batched_apply_lifter_and_floor_energy_kernel), dim3(Gr), dim3(CU1DBLOCK), 0, 0,
lanes, num_lanes, max_chunk_frames, num_cols, cepstral_lifter, use_energy,
energy_floor, log_energy, ldl, lifter_coeffs, features, ldf);
}
void cuda_process_window(const LaneDesc *lanes, int32_t num_lanes,
int32_t max_chunk_frames, int frame_length,
float dither, float energy_floor,
bool remove_dc_offset, float preemph_coeff,
bool need_raw_log_energy, float *log_energy_pre_window,
int32_t lde, const float *windowing,
float *tmp_windows, int32_t ldt, float *windows,
int32_t ldw) {
dim3 Gr(max_chunk_frames, num_lanes);
int Bl = CU1DBLOCK;
hipLaunchKernelGGL(( batched_process_window_kernel), dim3(Gr), dim3(Bl), 0, 0,
lanes, num_lanes, max_chunk_frames, frame_length, dither, energy_floor,
remove_dc_offset, preemph_coeff, need_raw_log_energy,
log_energy_pre_window, lde, windowing, tmp_windows, ldt, windows, ldw);
}
void cuda_extract_window(const LaneDesc *lanes, int32_t num_lanes,
int32_t max_chunk_frames, int32 frame_shift,
int32 frame_length, int32 frame_length_padded,
bool snip_edges, const float *wave, int32_t ldw,
float *windows, int32_t window_size, int32_t wlda,
BaseFloat *stash, int32_t ssize, int32_t lds) {
dim3 Gr(max_chunk_frames, num_lanes);
int Bl = CU1DBLOCK;
hipLaunchKernelGGL(( batched_extract_window_kernel), dim3(Gr), dim3(Bl), 0, 0,
lanes, num_lanes, frame_shift, frame_length, frame_length_padded,
snip_edges, wave, ldw, windows, window_size, wlda, stash, ssize, lds);
}
void cuda_dot_log(int32_t max_chunk_frames, int32_t num_lanes,
int32_t frame_length, float *signal_frame, int32_t lds,
float *signal_log_energy, int32_t lde) {
dim3 Gr(max_chunk_frames, num_lanes);
hipLaunchKernelGGL(( batched_dot_log_kernel), dim3(Gr), dim3(CU1DBLOCK), 0, 0, max_chunk_frames, frame_length,
signal_frame, lds,
signal_log_energy, lde);
}
void cuda_update_stash(const LaneDesc *lanes, int32_t num_lanes,
const BaseFloat *wave, int32_t ldw, BaseFloat *stash,
int32_t num_stash, int32_t lds) {
int Gr = num_lanes;
int Bl = 1024;
hipLaunchKernelGGL(( batched_update_stash_kernel), dim3(Gr), dim3(Bl), 0, 0, lanes, num_lanes, wave, ldw, stash,
num_stash, lds);
}
} // namespace kaldi
| ff93ae684dfb161ffd83fbb6e157c519c2cd2383.cu | // cudafeature/feature-spectral-batched_kernels.cu
//
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
// Justin Luitjens, Levi Barnes
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#if HAVE_CUDA == 1
#include <nvToolsExt.h>
#include <cub/cub.cuh>
#endif
#include "cudafeat/feature-spectral-batched-kernels.h"
#include "cudafeat/feature-spectral-cuda.h"
#include "cudafeat/lane-desc.h"
#include "cudamatrix/cu-rand.h"
namespace kaldi {
// Mimics the functionality of mel_banks_compute_kernel
// (found in feature-spectral-cuda.cu). The 3rd
// dimension (z) of the block grid gives the hardware
// "lane". lanes tells us which channel is in this lane,
// what current frame and sample are processed in this
// batch, etc.
__global__ void batched_mel_banks_compute_kernel(
const LaneDesc *lanes, int32_t n_lanes, int32_t max_chunk_frames,
float energy_floor, int32 *offsets, int32 *sizes, float **vecs,
const float *feats, int32_t ldf, float *mels, int32_t ldm, bool use_log) {
// Specialize WarpReduce for type float
typedef cub::WarpReduce<float> WarpReduce;
// Allocate WarpReduce shared memory for 8 warps
__shared__ typename WarpReduce::TempStorage temp_storage[8];
// warp will work together to compute sum
int tid = threadIdx.x;
int wid = threadIdx.y;
// blocks in the x dimension take different bins
int bin = blockIdx.x;
// frame is a combination of blocks in the y dimension and threads in the y
// dimension
int frame = blockIdx.y * blockDim.y + threadIdx.y;
int lane = blockIdx.z;
LaneDesc desc = lanes[lane];
int num_frames = desc.num_chunk_frames;
// TODO get offsets, sizes, and vecs from laneInfo?
int offset = offsets[bin];
int size = sizes[bin];
const float *v = vecs[bin];
const float *w = feats + frame * ldf + lane * max_chunk_frames * ldf + offset;
// perfom local sum
float sum = 0;
if (frame < num_frames) { // exclude frames beyond the end
for (int idx = tid; idx < size; idx += 32) {
sum += v[idx] * w[idx];
}
}
// Sum in cub
sum = WarpReduce(temp_storage[wid]).Sum(sum);
if (tid == 0 && frame < num_frames) {
if (use_log) {
// avoid log of zero
if (sum < energy_floor) sum = energy_floor;
float val = logf(sum);
mels[lane * max_chunk_frames * ldm + frame * ldm + bin] = val;
} else {
mels[lane * max_chunk_frames * ldm + frame * ldm + bin] = sum;
}
}
}
// Mimics the functionality of apply_lifter_and_floor_energy
// (found in feature-spectral-cuda.cu) for a chunk of data
// from several audio channels. The 2nd dimension
// (y) of the block grid gives the hardware "lane".
// The lanes array tells us which channel is in this lane,
// what current frame and sample are processed in this
// batch, etc.
__global__ void batched_apply_lifter_and_floor_energy_kernel(
const LaneDesc *lanes, int32_t n_lanes, int32_t max_chunk_frames,
int num_cols, float cepstral_lifter, bool use_energy, float energy_floor,
float *log_energy, int32_t ldl, float *lifter_coeffs, float *features,
int32_t ldf) {
int thread_id = threadIdx.x;
int frame = blockIdx.x;
int lane = blockIdx.y;
LaneDesc desc = lanes[lane];
if (frame > desc.num_chunk_frames) return;
float *feats = features + frame * ldf + lane * max_chunk_frames * ldf;
// apply lifter coefficients
if (cepstral_lifter != 0.0f) {
for (int c = thread_id; c < num_cols; c += CU1DBLOCK) {
float lift = lifter_coeffs[c];
float f = feats[c];
feats[c] = f * lift;
}
}
// Thread 0 for each frame will apply energy
if (use_energy && thread_id == 0) {
float energy = log_energy[frame + ldl * lane];
float log_energy_floor = log(energy_floor);
if (energy_floor > 0.0f && energy < log_energy_floor) {
energy = log_energy_floor;
}
feats[0] = energy;
}
}
// Mimics the functionality of process_window_kernel
// (found in feature-spectral-cuda.cu) for a chunk of data
// from several audio channels. The 2nd dimension
// (y) of the block grid gives the hardware "lane".
// The lanes array tells us which channel is in this lane,
// what current frame and sample are processed in this
// batch, etc.
__global__ void batched_process_window_kernel(
const LaneDesc *lanes, int32_t n_lanes, int32_t max_chunk_frames,
int frame_length, float dither, float energy_floor, bool remove_dc_offset,
float preemph_coeff, bool need_raw_log_energy, float *log_energy_pre_window,
int32_t lde, const float *windowing, float *tmp_windows, int32_t ldt,
float *windows, int32_t ldw) {
// Specialize WarpReduce for type float
typedef cub::BlockReduce<float, CU1DBLOCK> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int thread_id = threadIdx.x;
int row = blockIdx.x;
int lane = blockIdx.y;
LaneDesc desc = lanes[lane];
if (row >= desc.num_chunk_frames) return;
float *tmp_window = tmp_windows + row * ldt + lane * max_chunk_frames * ldt;
float *window = windows + row * ldw + lane * max_chunk_frames * ldw;
__shared__ float ssum;
float sum = 0;
float wdot = 0;
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
// tmp_window contains optional dither. Apply that on read.
float wval = window[idx];
if (dither != 0.0f) {
wval += tmp_window[idx] * dither;
}
// compute local sum for removing dc offset
sum += wval;
// compute dot product for log energy
wdot += wval * wval;
float windowing_mul = 1;
if (remove_dc_offset == false && preemph_coeff == 0.0f) {
// we are done here so set windowing multiplication on write.
windowing_mul = windowing[idx];
}
// write dithered output
window[idx] = wval * windowing_mul;
}
__syncthreads();
if (remove_dc_offset) {
// we will recompute this below
wdot = 0.0f;
// use cub to reduce
sum = BlockReduce(temp_storage).Sum(sum);
// broadcast sum to entire block
if (thread_id == 0) ssum = sum;
__syncthreads();
sum = -ssum / frame_length;
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
float windowing_mul = 1;
float *out = window;
if (preemph_coeff == 0.0f) {
// we are done here so apply windowing
windowing_mul = windowing[idx];
} else {
// write to temp window as we will copy back into window
// when doing pre-emphasis
out = tmp_window;
}
// updated window value
float wval = window[idx] + sum;
// compute new dot product with dc offset removed
wdot += wval * wval;
// write output
out[idx] = wval * windowing_mul;
}
}
__syncthreads();
// if pointer is not NULL we will set energy to either
// the computed energy or 0 depending on need_raw_log_energy
if (log_energy_pre_window != NULL) {
float energy = 0.0f;
if (need_raw_log_energy) {
// must sync to use retemp_storage
if (remove_dc_offset) __syncthreads();
// use cub to reduce
wdot = BlockReduce(temp_storage).Sum(wdot);
energy = max(wdot, energy_floor);
}
if (thread_id == 0) {
log_energy_pre_window[row + lane * lde] = log(energy);
}
}
// TODO this could be more efficient using shared memory instead of
// tmp_window.
if (preemph_coeff != 0.0f) {
// wait for tmp_window to be computed
__threadfence();
__syncthreads();
// starting thread idx at 0 to keep writes aligned.
// unaligned reads are less painful then unaligned writes
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
float wval = tmp_window[idx];
float prev_window = wval;
if (idx > 0) {
prev_window = tmp_window[idx - 1];
}
// use __fmul_rn to match CPU
// window[idx] = (wval - preemph_coeff*prev_window) * windowing[idx];
window[idx] =
(wval - __fmul_rn(preemph_coeff, prev_window)) * windowing[idx];
}
}
}
__host__ __device__ inline int32 FirstSampleOfFrame(int32 frame,
int32 frame_shift,
int32 window_size,
bool snip_edges) {
if (snip_edges) {
return frame * frame_shift;
} else {
int32 midpoint_of_frame = frame_shift * frame + frame_shift / 2,
beginning_of_frame = midpoint_of_frame - window_size / 2;
return beginning_of_frame;
}
}
// Mimics the functionality of extract_window_kernel
// (found in feature-spectral-cuda.cu) for a chunk of data
// from several audio channels. The 2nd dimension
// (y) of the block grid gives the hardware "lane".
// The lanes array tells us which channel is in this lane,
// what current frame and sample are processed in this
// batch, etc.
// Extra samples not processed in this chunk are moved to
// "stash" where they'll be pre-pended to the next chunk
// from this channel
__global__ void batched_extract_window_kernel(
const LaneDesc *lanes, int32_t num_lanes, int32 frame_shift,
int32 frame_length, int32 frame_length_padded, bool snip_edges,
const BaseFloat __restrict__ *wave, int32_t ldw,
BaseFloat *__restrict__ windows, int32_t window_size, int32_t wlda,
BaseFloat *stash, int32_t ssize, int32_t lds) {
// local frame number
int32_t fidx = blockIdx.x;
int32_t tidx = threadIdx.x;
int32_t lane = blockIdx.y;
const LaneDesc desc = lanes[lane];
ChannelId channel = desc.channel;
// This is the current sample that is pointed to by wave
int32_t current_sample = desc.current_sample;
// current frame we are computing in global space
int32_t current_frame = desc.current_frame;
// global frame number computed by this block
int32_t global_frame = current_frame + fidx;
int32_t num_chunk_samples = desc.num_chunk_samples;
if (fidx > desc.num_chunk_frames) return;
// offset input/output by channels or lanes
stash = stash + channel * lds;
wave = wave + lane * ldw;
BaseFloat *window = windows + fidx * wlda + gridDim.x * lane * wlda;
// This is the first sample needed to compute this frame
int32_t start_sample =
FirstSampleOfFrame(global_frame, frame_shift, window_size, snip_edges);
// Sample offset is how much we have to offset our index
// into the input wave.
int32_t wave_start = start_sample - current_sample;
// wave_start and wave_end are start and end indexes into 'wave', for the
// piece of wave that we're trying to extract.
int32_t wave_end = wave_start + frame_length;
// wave_start will be negative on successive chunks as we need
// to grab context from stash.
if ((current_frame > 0 || wave_start >= 0) && wave_end <= num_chunk_samples) {
// the normal case-- no edge effects to consider.
for (int i = tidx; i < frame_length; i += blockDim.x) {
int32_t widx = wave_start + i;
BaseFloat val;
if (widx >= 0) {
val = wave[widx];
} else {
// widx is negative. Add it to the stash size
// to get the correct index into the stash
int32_t sidx = ssize + widx;
val = stash[sidx];
}
window[i] = val;
}
} else {
// Deal with any end effects by reflection, if needed. This code will only
// be reached for about two frames per utterance, so we don't concern
// ourselves excessively with efficiency.
for (int s = tidx; s < frame_length; s += blockDim.x) {
int32 s_in_wave = wave_start + s;
while (s_in_wave < 0 || s_in_wave >= num_chunk_samples) {
// reflect around the beginning or end of the wave.
// e.g. -1 -> 0, -2 -> 1.
// dim -> dim - 1, dim + 1 -> dim - 2.
// the code supports repeated reflections, although this
// would only be needed in pathological cases.
if (s_in_wave < 0)
s_in_wave = -s_in_wave - 1;
else
s_in_wave = 2 * num_chunk_samples - 1 - s_in_wave;
}
window[s] = wave[s_in_wave];
}
}
if (frame_length_padded > frame_length) {
for (int i = frame_length + tidx; i < frame_length_padded;
i += blockDim.x) {
window[i] = 0.0f;
}
}
}
// For each frame
// compute logf(dot(signal_frame, signal_frame))
// This is the batched version. The y-dimension of the grid
// give the lane number
__global__ void batched_dot_log_kernel(int32_t max_chunk_frames,
int32_t frame_length,
float *signal_frame, int32_t lds,
float *signal_log_energy, int32_t lde) {
// Specialize WarpReduce for type float
typedef cub::BlockReduce<float, CU1DBLOCK> BlockReduce;
// Allocate WarpReduce shared memory for 8 warps
__shared__ typename BlockReduce::TempStorage temp_storage;
int32_t frame = blockIdx.x;
int32_t tid = threadIdx.x;
int32_t lane = blockIdx.y;
float *in = signal_frame + frame * lds + max_chunk_frames * lane * lds;
float sum = 0;
// preform local dot product
for (int32_t i = tid; i < frame_length; i += blockDim.x) {
float val = in[i];
sum += val * val;
}
// reduce using cub
sum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
signal_log_energy[frame + lane * lde] = logf(sum);
}
}
__global__ void batched_update_stash_kernel(const LaneDesc *lanes,
int32_t num_lanes,
const BaseFloat *wave, int32_t ldw,
BaseFloat *stash, int32_t num_stash,
int32_t lds) {
int32_t lane = blockIdx.x;
LaneDesc desc = lanes[lane];
int32_t channel = desc.channel;
int32_t num_chunk_samples = desc.num_chunk_samples;
// offset memory by lane or channel
wave = wave + lane * ldw;
stash = stash + channel * lds;
int32_t sample_offset = num_chunk_samples - num_stash;
for (int i = threadIdx.x; i < num_stash; i += blockDim.x) {
int32_t idx = sample_offset + i;
float val;
if (idx < 0) {
// data must come from old stash
val = stash[idx + num_stash];
} else {
// data comes from new wave
val = wave[idx];
}
__syncthreads();
stash[i] = val;
}
}
// Each threadblock computes a different row of the matrix.
// Threads in the same block compute the row collaboratively.
// This kernel must be called out of place (A_in!=A_out).
__global__ void power_spectrum_kernel(int row_length, const float *A_in, int32_t ldi,
float *A_out, int32_t ldo,
bool use_power) {
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
const float *Ar = A_in + block_id * ldi;
float *Aw = A_out + block_id * ldo;
int half_length = row_length / 2;
for (int idx = thread_id; idx < half_length; idx += CU1DBLOCK) {
// ignore special case
if (idx == 0) continue;
float2 val = reinterpret_cast<const float2 *>(Ar)[idx];
float ret = val.x * val.x + val.y * val.y;
if (use_power) {
Aw[idx] = ret;
} else {
Aw[idx] = sqrtf(ret);
}
}
// handle special case
if (threadIdx.x == 0) {
float real = Ar[0];
// cufft puts this at the end, this is different than kaldi does with its
// own
// internal implementation
float im = Ar[row_length];
if (use_power) {
Aw[0] = real * real;
Aw[half_length] = im * im;
} else {
Aw[0] = fabs(real);
Aw[half_length] = fabs(im);
}
}
}
void cuda_power_spectrum(int32_t max_chunk_frames, int32_t num_lanes,
int row_length, const float *A_in, int32_t ldi,
float *A_out, int32_t ldo, bool use_power) {
power_spectrum_kernel<<<max_chunk_frames * num_lanes, CU1DBLOCK>>>(
row_length, A_in, ldi, A_out, ldo, use_power);
}
void cuda_mel_banks_compute(const LaneDesc *lanes, int32_t num_lanes,
int32_t max_chunk_frames, int32_t num_bins,
float energy_floor, int32 *offsets, int32 *sizes,
float **vecs, const float *feats, int32_t ldf,
float *mels, int32_t ldm, bool use_log) {
dim3 Bl(32, 8);
dim3 Gr(num_bins, (max_chunk_frames + Bl.y - 1) / Bl.y, num_lanes);
batched_mel_banks_compute_kernel<<<Gr, Bl>>>(
lanes, num_lanes, max_chunk_frames, energy_floor, offsets, sizes, vecs,
feats, ldf, mels, ldm, use_log);
}
void cuda_apply_lifter_and_floor_energy(const LaneDesc *lanes,
int32_t num_lanes,
int32_t max_chunk_frames, int num_cols,
float cepstral_lifter, bool use_energy,
float energy_floor, float *log_energy,
int32_t ldl, float *lifter_coeffs,
float *features, int32_t ldf) {
dim3 Gr(max_chunk_frames, num_lanes);
batched_apply_lifter_and_floor_energy_kernel<<<Gr, CU1DBLOCK>>>(
lanes, num_lanes, max_chunk_frames, num_cols, cepstral_lifter, use_energy,
energy_floor, log_energy, ldl, lifter_coeffs, features, ldf);
}
void cuda_process_window(const LaneDesc *lanes, int32_t num_lanes,
int32_t max_chunk_frames, int frame_length,
float dither, float energy_floor,
bool remove_dc_offset, float preemph_coeff,
bool need_raw_log_energy, float *log_energy_pre_window,
int32_t lde, const float *windowing,
float *tmp_windows, int32_t ldt, float *windows,
int32_t ldw) {
dim3 Gr(max_chunk_frames, num_lanes);
int Bl = CU1DBLOCK;
batched_process_window_kernel<<<Gr, Bl>>>(
lanes, num_lanes, max_chunk_frames, frame_length, dither, energy_floor,
remove_dc_offset, preemph_coeff, need_raw_log_energy,
log_energy_pre_window, lde, windowing, tmp_windows, ldt, windows, ldw);
}
void cuda_extract_window(const LaneDesc *lanes, int32_t num_lanes,
int32_t max_chunk_frames, int32 frame_shift,
int32 frame_length, int32 frame_length_padded,
bool snip_edges, const float *wave, int32_t ldw,
float *windows, int32_t window_size, int32_t wlda,
BaseFloat *stash, int32_t ssize, int32_t lds) {
dim3 Gr(max_chunk_frames, num_lanes);
int Bl = CU1DBLOCK;
batched_extract_window_kernel<<<Gr, Bl>>>(
lanes, num_lanes, frame_shift, frame_length, frame_length_padded,
snip_edges, wave, ldw, windows, window_size, wlda, stash, ssize, lds);
}
void cuda_dot_log(int32_t max_chunk_frames, int32_t num_lanes,
int32_t frame_length, float *signal_frame, int32_t lds,
float *signal_log_energy, int32_t lde) {
dim3 Gr(max_chunk_frames, num_lanes);
batched_dot_log_kernel<<<Gr, CU1DBLOCK>>>(max_chunk_frames, frame_length,
signal_frame, lds,
signal_log_energy, lde);
}
void cuda_update_stash(const LaneDesc *lanes, int32_t num_lanes,
const BaseFloat *wave, int32_t ldw, BaseFloat *stash,
int32_t num_stash, int32_t lds) {
int Gr = num_lanes;
int Bl = 1024;
batched_update_stash_kernel<<<Gr, Bl>>>(lanes, num_lanes, wave, ldw, stash,
num_stash, lds);
}
} // namespace kaldi
|
6c0253266daccc73577528a07ae3e9107cc30bc2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_kernel.cuh"
// ====================================================================
// create context, it contains all data that gpu needs to use
// ====================================================================
context* init_context()
{
hipSetDevice(1);
context* ctx;
hipMallocManaged((void**)&ctx, sizeof(context));
ctx->resolution[0] = DIM_X;
ctx->resolution[1] = DIM_Y;
ctx->resolution[2] = DIM_Z;
ctx->voxel_size = VOXEL_SIZE;
ctx->trunc_margin = 5 * ctx->voxel_size;
ctx->tsdf_threshold = TSDF_THRESHOLD;
ctx->weight_threshhold = WEIGHT_THRESHOLD;
int voxel_num = ctx->resolution[0] * ctx->resolution[1] * ctx->resolution[2];
hipMalloc((void**)&ctx->tsdf_voxel, voxel_num * sizeof(float));
hipMalloc((void**)&ctx->color_voxel, voxel_num * sizeof(uint8_t) * 3);
hipMalloc((void**)&ctx->in_buf_depth, CAM_NUM * WIDTH * HEIGHT * sizeof(uint8_t));
hipMalloc((void**)&ctx->in_buf_color, CAM_NUM * WIDTH * HEIGHT * sizeof(uint8_t) * 3);
hipMalloc((void**)&ctx->depth, CAM_NUM * WIDTH * HEIGHT * sizeof(float));
hipMalloc((void**)&ctx->pcd, 3 * WIDTH * HEIGHT * sizeof(float));
hipMemset(ctx->tsdf_voxel, 1, voxel_num * sizeof(float));
hipMemset(ctx->color_voxel, 0, voxel_num * sizeof(uint8_t) * 3);
HANDLE_ERROR();
return ctx;
}
// ====================================================================
// dequantization is necessary since depth has been quantizated to 0-255
// ====================================================================
void dequantization(context* ctx, uint8_t *input_depth, float *output_depth)
{
int width = ctx->width;
int height = ctx->height;
dim3 blocks(width / 32, height / 24);
dim3 threads(32, 24);
hipMemset(output_depth, 0, width * height * sizeof(float));
hipLaunchKernelGGL(( dequantization_kernel), dim3(blocks), dim3(threads), 0, 0, ctx, input_depth, output_depth);
}
// ====================================================================
// core function, integrate an depth frame to volume
// ====================================================================
void Integrate(context* ctx, uint8_t *in_buf_depth, uint8_t* in_buf_color)
{
#ifdef TimeEventRecord
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
#endif
hipMemcpy(ctx->in_buf_depth, in_buf_depth, CAM_NUM * WIDTH * HEIGHT * sizeof(uint8_t), hipMemcpyHostToDevice);
hipMemcpy(ctx->in_buf_color, in_buf_color, CAM_NUM * 3 * WIDTH * HEIGHT * sizeof(uint8_t), hipMemcpyHostToDevice);
for(int i = 0; i < CAM_NUM; i++) {
dequantization(ctx, ctx->in_buf_depth + WIDTH * HEIGHT * i, ctx->depth + WIDTH * HEIGHT * i);
HANDLE_ERROR();
}
dim3 blocks(DIM_Z / 32, DIM_Y / 32);
dim3 threads(32, 32);
hipLaunchKernelGGL(( integrate_kernel), dim3(blocks), dim3(threads), 0, 0, ctx);
HANDLE_ERROR();
#ifdef TimeEventRecord
hipEventRecord(end);
hipEventSynchronize(end);
float millisecond = 0;
hipEventElapsedTime(&millisecond, start, end);
printf("\t Integrate time = %f ms\n", millisecond);
#endif
}
void get_pcd_in_world(context* ctx, uint8_t *in_buf_depth, float *pcd, int cam_idx)
{
hipMemcpy(ctx->in_buf_depth, in_buf_depth, WIDTH * HEIGHT * sizeof(uint8_t), hipMemcpyHostToDevice);
dequantization(ctx, ctx->in_buf_depth, ctx->depth);
dim3 blocks(WIDTH / 32, HEIGHT / 24);
dim3 threads(32, 24);
hipLaunchKernelGGL(( depth_to_world_pcd), dim3(blocks), dim3(threads), 0, 0, ctx, cam_idx);
hipMemcpy(pcd, ctx->pcd, 3 * WIDTH * HEIGHT * sizeof(float), hipMemcpyDeviceToHost);
}
void memcpy_volume_to_cpu(context* ctx, float* tsdf_out, uint8_t* rgb_out)
{
int voxel_num = ctx->resolution[0] * ctx->resolution[1] * ctx->resolution[2];
hipMemcpy(tsdf_out, ctx->tsdf_voxel, voxel_num * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(rgb_out, ctx->color_voxel, voxel_num * sizeof(uint8_t) * 3, hipMemcpyDeviceToHost);
}
// ====================================================================
// release memory in GPU
// ====================================================================
void release_context(context* ctx)
{
hipFree(ctx->tsdf_voxel);
hipFree(ctx->in_buf_depth);
hipFree(ctx->in_buf_color);
hipFree(ctx->depth);
hipFree(ctx->pcd);
hipFree(ctx);
}
| 6c0253266daccc73577528a07ae3e9107cc30bc2.cu | #include "cuda_kernel.cuh"
// ====================================================================
// create context, it contains all data that gpu needs to use
// ====================================================================
context* init_context()
{
cudaSetDevice(1);
context* ctx;
cudaMallocManaged((void**)&ctx, sizeof(context));
ctx->resolution[0] = DIM_X;
ctx->resolution[1] = DIM_Y;
ctx->resolution[2] = DIM_Z;
ctx->voxel_size = VOXEL_SIZE;
ctx->trunc_margin = 5 * ctx->voxel_size;
ctx->tsdf_threshold = TSDF_THRESHOLD;
ctx->weight_threshhold = WEIGHT_THRESHOLD;
int voxel_num = ctx->resolution[0] * ctx->resolution[1] * ctx->resolution[2];
cudaMalloc((void**)&ctx->tsdf_voxel, voxel_num * sizeof(float));
cudaMalloc((void**)&ctx->color_voxel, voxel_num * sizeof(uint8_t) * 3);
cudaMalloc((void**)&ctx->in_buf_depth, CAM_NUM * WIDTH * HEIGHT * sizeof(uint8_t));
cudaMalloc((void**)&ctx->in_buf_color, CAM_NUM * WIDTH * HEIGHT * sizeof(uint8_t) * 3);
cudaMalloc((void**)&ctx->depth, CAM_NUM * WIDTH * HEIGHT * sizeof(float));
cudaMalloc((void**)&ctx->pcd, 3 * WIDTH * HEIGHT * sizeof(float));
cudaMemset(ctx->tsdf_voxel, 1, voxel_num * sizeof(float));
cudaMemset(ctx->color_voxel, 0, voxel_num * sizeof(uint8_t) * 3);
HANDLE_ERROR();
return ctx;
}
// ====================================================================
// dequantization is necessary since depth has been quantizated to 0-255
// ====================================================================
void dequantization(context* ctx, uint8_t *input_depth, float *output_depth)
{
int width = ctx->width;
int height = ctx->height;
dim3 blocks(width / 32, height / 24);
dim3 threads(32, 24);
cudaMemset(output_depth, 0, width * height * sizeof(float));
dequantization_kernel<<<blocks, threads>>>(ctx, input_depth, output_depth);
}
// ====================================================================
// core function, integrate an depth frame to volume
// ====================================================================
void Integrate(context* ctx, uint8_t *in_buf_depth, uint8_t* in_buf_color)
{
#ifdef TimeEventRecord
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
#endif
cudaMemcpy(ctx->in_buf_depth, in_buf_depth, CAM_NUM * WIDTH * HEIGHT * sizeof(uint8_t), cudaMemcpyHostToDevice);
cudaMemcpy(ctx->in_buf_color, in_buf_color, CAM_NUM * 3 * WIDTH * HEIGHT * sizeof(uint8_t), cudaMemcpyHostToDevice);
for(int i = 0; i < CAM_NUM; i++) {
dequantization(ctx, ctx->in_buf_depth + WIDTH * HEIGHT * i, ctx->depth + WIDTH * HEIGHT * i);
HANDLE_ERROR();
}
dim3 blocks(DIM_Z / 32, DIM_Y / 32);
dim3 threads(32, 32);
integrate_kernel<<<blocks, threads>>>(ctx);
HANDLE_ERROR();
#ifdef TimeEventRecord
cudaEventRecord(end);
cudaEventSynchronize(end);
float millisecond = 0;
cudaEventElapsedTime(&millisecond, start, end);
printf("\t Integrate time = %f ms\n", millisecond);
#endif
}
void get_pcd_in_world(context* ctx, uint8_t *in_buf_depth, float *pcd, int cam_idx)
{
cudaMemcpy(ctx->in_buf_depth, in_buf_depth, WIDTH * HEIGHT * sizeof(uint8_t), cudaMemcpyHostToDevice);
dequantization(ctx, ctx->in_buf_depth, ctx->depth);
dim3 blocks(WIDTH / 32, HEIGHT / 24);
dim3 threads(32, 24);
depth_to_world_pcd<<<blocks, threads>>>(ctx, cam_idx);
cudaMemcpy(pcd, ctx->pcd, 3 * WIDTH * HEIGHT * sizeof(float), cudaMemcpyDeviceToHost);
}
void memcpy_volume_to_cpu(context* ctx, float* tsdf_out, uint8_t* rgb_out)
{
int voxel_num = ctx->resolution[0] * ctx->resolution[1] * ctx->resolution[2];
cudaMemcpy(tsdf_out, ctx->tsdf_voxel, voxel_num * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(rgb_out, ctx->color_voxel, voxel_num * sizeof(uint8_t) * 3, cudaMemcpyDeviceToHost);
}
// ====================================================================
// release memory in GPU
// ====================================================================
void release_context(context* ctx)
{
cudaFree(ctx->tsdf_voxel);
cudaFree(ctx->in_buf_depth);
cudaFree(ctx->in_buf_color);
cudaFree(ctx->depth);
cudaFree(ctx->pcd);
cudaFree(ctx);
}
|
73f07511fe4fab1fdb6749f4f077a2f43f41b9c7.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<stdbool.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
extern "C" void allocateMemory(int **arr, int arraySize)
{
hipMallocManaged(arr, ( (arraySize* sizeof(int))));
}
extern "C" void callCudaFree(int* local)
{
hipFree(local);
}
//extern void callMPI(int* local,int* arr,int arrSize,int mpi_size,int x_rank);
extern "C" void cudaInit( int myrank)
{
int cE;
int cudaDeviceCount = 1;
if( (cE = hipGetDeviceCount( &cudaDeviceCount)) != hipSuccess )
{
printf(" Unable to determine cuda device count, error is %d, count is %d\n",
cE, cudaDeviceCount );
exit(-1);
}
if( (cE = hipSetDevice( myrank % cudaDeviceCount )) != hipSuccess )
{
printf(" Unable to have rank %d set to cuda device %d, error is %d \n",
myrank, (myrank % cudaDeviceCount), cE);
exit(-1);
}
}
__global__ void mergeKernel(int j, int mpi_size, int mpi_rank, int *arr, int arrSize, int sizeCompare,int* prev_local, int* next_local)
{
//nt *prev_local = NULL;
//int *next_local = NULL;
bool sameVal = false;
int i = blockIdx.x*blockDim.x + threadIdx.x;
int global_idx = i + arrSize / mpi_size * mpi_rank;
int x = global_idx ^ j;
int x_rank = x / (arrSize / mpi_size);
if ( global_idx >= x ) {
if ( mpi_rank == x_rank ) {
if(sameVal == false)
{
sameVal = true;
}
}
else {
if ( prev_local == NULL ) {
//prev_local = calloc(arrSize / mpi_size, sizeof(int));
//allocateMemory(&prev_local,arrSize/mpi_size);
prev_local = arr + arrSize / mpi_size * x_rank;
//callMPI(prev_local,arr,arrSize,mpi_size,x_rank);
}
if ( (sizeCompare & x) == 0 && arr[i] < prev_local[i] ) {
arr[i] = prev_local[i];
}
if ( (sizeCompare & x) != 0 && arr[i] > prev_local[i] ) {
arr[i] = prev_local[i];
}
}
}
else {
if ( x_rank == mpi_rank ) {
int y = x - arrSize / mpi_size * mpi_rank;
if ( (global_idx & sizeCompare) == 0 && arr[i] > arr[y] ) {
int temp = arr[i];
arr[i] = arr[y];
arr[y] = temp;
}
if ( (global_idx & sizeCompare) != 0 && arr[i] < arr[y] ) {
int temp = arr[i];
arr[i] = arr[y];
arr[y] = temp;
}
}
else {
if ( next_local == NULL ) {
//next_local = calloc(arrSize / mpi_size, sizeof(int));
//allocateMemory(&next_local,arrSize/mpi_size);
next_local = arr + arrSize / mpi_size * x_rank;
//callMPI(next_local,arr,arrSize,mpi_size,x_rank);
}
if ( (global_idx & sizeCompare) == 0 && arr[i] > next_local[i] ) {
arr[i] = next_local[i];
}
if ( (global_idx & sizeCompare) != 0 && arr[i] < next_local[i] ) {
arr[i] = next_local[i];
}
}
}
}
extern "C" void mergeKernelLaunch(int blockSize,int threadsCount,int j, int mpi_size, int mpi_rank, int *arr, int arrSize, int sizeCompare,int* prev_local, int* next_local)
{
hipLaunchKernelGGL(( mergeKernel), dim3(blockSize),dim3(threadsCount), 0, 0, j, mpi_size, mpi_rank, arr, arrSize, sizeCompare, prev_local, next_local);
} | 73f07511fe4fab1fdb6749f4f077a2f43f41b9c7.cu | #include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<stdbool.h>
#include <cuda.h>
#include <cuda_runtime.h>
extern "C" void allocateMemory(int **arr, int arraySize)
{
cudaMallocManaged(arr, ( (arraySize* sizeof(int))));
}
extern "C" void callCudaFree(int* local)
{
cudaFree(local);
}
//extern void callMPI(int* local,int* arr,int arrSize,int mpi_size,int x_rank);
extern "C" void cudaInit( int myrank)
{
int cE;
int cudaDeviceCount = 1;
if( (cE = cudaGetDeviceCount( &cudaDeviceCount)) != cudaSuccess )
{
printf(" Unable to determine cuda device count, error is %d, count is %d\n",
cE, cudaDeviceCount );
exit(-1);
}
if( (cE = cudaSetDevice( myrank % cudaDeviceCount )) != cudaSuccess )
{
printf(" Unable to have rank %d set to cuda device %d, error is %d \n",
myrank, (myrank % cudaDeviceCount), cE);
exit(-1);
}
}
__global__ void mergeKernel(int j, int mpi_size, int mpi_rank, int *arr, int arrSize, int sizeCompare,int* prev_local, int* next_local)
{
//nt *prev_local = NULL;
//int *next_local = NULL;
bool sameVal = false;
int i = blockIdx.x*blockDim.x + threadIdx.x;
int global_idx = i + arrSize / mpi_size * mpi_rank;
int x = global_idx ^ j;
int x_rank = x / (arrSize / mpi_size);
if ( global_idx >= x ) {
if ( mpi_rank == x_rank ) {
if(sameVal == false)
{
sameVal = true;
}
}
else {
if ( prev_local == NULL ) {
//prev_local = calloc(arrSize / mpi_size, sizeof(int));
//allocateMemory(&prev_local,arrSize/mpi_size);
prev_local = arr + arrSize / mpi_size * x_rank;
//callMPI(prev_local,arr,arrSize,mpi_size,x_rank);
}
if ( (sizeCompare & x) == 0 && arr[i] < prev_local[i] ) {
arr[i] = prev_local[i];
}
if ( (sizeCompare & x) != 0 && arr[i] > prev_local[i] ) {
arr[i] = prev_local[i];
}
}
}
else {
if ( x_rank == mpi_rank ) {
int y = x - arrSize / mpi_size * mpi_rank;
if ( (global_idx & sizeCompare) == 0 && arr[i] > arr[y] ) {
int temp = arr[i];
arr[i] = arr[y];
arr[y] = temp;
}
if ( (global_idx & sizeCompare) != 0 && arr[i] < arr[y] ) {
int temp = arr[i];
arr[i] = arr[y];
arr[y] = temp;
}
}
else {
if ( next_local == NULL ) {
//next_local = calloc(arrSize / mpi_size, sizeof(int));
//allocateMemory(&next_local,arrSize/mpi_size);
next_local = arr + arrSize / mpi_size * x_rank;
//callMPI(next_local,arr,arrSize,mpi_size,x_rank);
}
if ( (global_idx & sizeCompare) == 0 && arr[i] > next_local[i] ) {
arr[i] = next_local[i];
}
if ( (global_idx & sizeCompare) != 0 && arr[i] < next_local[i] ) {
arr[i] = next_local[i];
}
}
}
}
extern "C" void mergeKernelLaunch(int blockSize,int threadsCount,int j, int mpi_size, int mpi_rank, int *arr, int arrSize, int sizeCompare,int* prev_local, int* next_local)
{
mergeKernel<<<blockSize,threadsCount>>>(j, mpi_size, mpi_rank, arr, arrSize, sizeCompare, prev_local, next_local);
} |
768786c642720103d1e8ab354fe7f9423c13f9ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__device__ __managed__ int ret[1000];
__global__ void AplusB(int a, int b) {
ret[threadIdx.x] = a + b + threadIdx.x;
}
int main() {
hipLaunchKernelGGL(( AplusB), dim3(1), dim3(1000) , 0, 0, 10, 100);
hipDeviceSynchronize();
for(int i=0; i<1000; i++)
printf("%d: A+B = %d\n", i, ret[i]);
return 0;
}
| 768786c642720103d1e8ab354fe7f9423c13f9ed.cu | #include <stdio.h>
__device__ __managed__ int ret[1000];
__global__ void AplusB(int a, int b) {
ret[threadIdx.x] = a + b + threadIdx.x;
}
int main() {
AplusB<<< 1, 1000 >>>(10, 100);
cudaDeviceSynchronize();
for(int i=0; i<1000; i++)
printf("%d: A+B = %d\n", i, ret[i]);
return 0;
}
|
80c8e988db524a7e495895b5448b127b68e5d110.hip | // !!! This is a file automatically generated by hipify!!!
/*
Author: Danny George
High Performance Simulation Laboratory
Boise State University
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. */
#include <iostream>
#include <GL/glew.h>
#include <GL/glfw.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include <thrust/device_vector.h>
#include "WindData.cuh"
using namespace std;
int main(int argc, char const *argv[])
{
bool load_file = false;
const char * file_name;
if (argc > 1) {
file_name = argv[1];
load_file = true;
}
// initialize glfw
if (!glfwInit()) {
throw std::runtime_error("Couldn't initialize glfw");
}
// open a window and create its opengl context
if (!glfwOpenWindow(16, 16, 0, 0, 0, 0, 32, 0, GLFW_WINDOW)) {
fprintf(stderr, "Failed to open GLFW window\n");
glfwTerminate();
}
// initialize GLEW
glewExperimental = GL_TRUE;
if (glewInit() != GLEW_OK) {
throw std::runtime_error("Couldn't initialize GLEW");
}
hipGLSetGLDevice(0);
WindDataShape shape(4, 4, 4, 2);
WindDataThrustHost host(shape);
if (load_file) {
std::ifstream in;
in.open(file_name);
WindDataThrustASCIIConverter::fill_from_stream(host, in);
in.close();
}
//WindDataThrustDevice dev(host.shape);
WindDataTextureMemory tx(host.shape);
copy(host, tx);
WindDataThrustHost host2(host.shape);
copy(tx, host2);
WindDataThrustASCIIConverter::encode(host, std::cout);
cout << endl;
cout << endl;
cout << endl;
cout << "-----------------------" << endl;
cout << endl;
cout << endl;
cout << endl;
WindDataThrustASCIIConverter::encode(host2, std::cout);
return 0;
}
| 80c8e988db524a7e495895b5448b127b68e5d110.cu | /*
Author: Danny George
High Performance Simulation Laboratory
Boise State University
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. */
#include <iostream>
#include <GL/glew.h>
#include <GL/glfw.h>
#include <cuda.h>
#include <cuda_gl_interop.h>
#include <thrust/device_vector.h>
#include "WindData.cuh"
using namespace std;
int main(int argc, char const *argv[])
{
bool load_file = false;
const char * file_name;
if (argc > 1) {
file_name = argv[1];
load_file = true;
}
// initialize glfw
if (!glfwInit()) {
throw std::runtime_error("Couldn't initialize glfw");
}
// open a window and create its opengl context
if (!glfwOpenWindow(16, 16, 0, 0, 0, 0, 32, 0, GLFW_WINDOW)) {
fprintf(stderr, "Failed to open GLFW window\n");
glfwTerminate();
}
// initialize GLEW
glewExperimental = GL_TRUE;
if (glewInit() != GLEW_OK) {
throw std::runtime_error("Couldn't initialize GLEW");
}
cudaGLSetGLDevice(0);
WindDataShape shape(4, 4, 4, 2);
WindDataThrustHost host(shape);
if (load_file) {
std::ifstream in;
in.open(file_name);
WindDataThrustASCIIConverter::fill_from_stream(host, in);
in.close();
}
//WindDataThrustDevice dev(host.shape);
WindDataTextureMemory tx(host.shape);
copy(host, tx);
WindDataThrustHost host2(host.shape);
copy(tx, host2);
WindDataThrustASCIIConverter::encode(host, std::cout);
cout << endl;
cout << endl;
cout << endl;
cout << "-----------------------" << endl;
cout << endl;
cout << endl;
cout << endl;
WindDataThrustASCIIConverter::encode(host2, std::cout);
return 0;
}
|
dd92188cc0a41b66e9470ef13d2c46fe9c1ccfa6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define NX 200
#define NY 100
__global__ void saxpy2D(float scalar, float * x, float * y)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if ( row < NX && col < NY ) // Make sure we don't do more work than we have data!
y[row*NY+col] = scalar * x[row*NY+col] + y[row*NY+col];
}
int main()
{
float *x, *y;
float maxError = 0;
int size = NX * NY * sizeof (float); // The total number of bytes per vector
hipError_t ierrAsync;
hipError_t ierrSync;
// Allocate memory
hipMallocManaged(&x, size);
hipMallocManaged(&y, size);
// Initialize memory
for( int i = 0; i < NX*NY; ++i )
{
x[i] = 1.0f;
y[i] = 2.0f;
}
// Get device properties
hipDeviceProp_t prop;
hipError_t ierr;
ierr = hipGetDeviceProperties(&prop, 0);
if (ierr != hipSuccess) { printf("Device property error: %s\n", hipGetErrorString(ierr)); }
printf("========== DEVICE PROPERTIES ==========\n");
printf("Device number: %d\n", 0);
printf(" Device name: %s\n", prop.name);
printf(" Compute capability: %d.%d\n", prop.major, prop.minor);
printf(" Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf(" Max threads in X-dimension of block: %d\n", prop.maxThreadsDim[0]);
printf(" Max threads in Y-dimension of block: %d\n", prop.maxThreadsDim[1]);
printf(" Max threads in Z-dimension of block: %d\n\n", prop.maxThreadsDim[2]);
dim3 threads_per_block (1024,16,1);
dim3 number_of_blocks ((NX/threads_per_block.x)+1,
(NY/threads_per_block.y)+1,
1);
// Check total number of threads
if (threads_per_block.x * threads_per_block.y * threads_per_block.z > prop.maxThreadsPerBlock) {
printf("Max number of threads exceeded!\n"); goto cleanup;
}
// Check number of threads_per_block
if (prop.maxThreadsDim[0] < threads_per_block.x && prop.maxThreadsDim[1] < threads_per_block.y) { printf("Block x- or y- sizes exceeded device limits!\n"); goto cleanup; }
hipLaunchKernelGGL(( saxpy2D) , dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, 2.0f, x, y );
ierrSync = hipGetLastError();
ierrAsync = hipDeviceSynchronize(); // Wait for the GPU to finish
if (ierrSync != hipSuccess) { printf("Sync error: %s\n", hipGetErrorString(ierrSync)); }
if (ierrAsync != hipSuccess) { printf("Async error: %s\n", hipGetErrorString(ierrAsync)); }
// Print out our Max Error
for( int i = 0; i < NX*NY; ++i )
if (abs(4-y[i]) > maxError) { maxError = abs(4-y[i]); }
printf("Max Error: %.5f", maxError);
cleanup:
// Free all our allocated memory
hipFree( x ); hipFree( y );
} | dd92188cc0a41b66e9470ef13d2c46fe9c1ccfa6.cu | #include <stdio.h>
#define NX 200
#define NY 100
__global__ void saxpy2D(float scalar, float * x, float * y)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if ( row < NX && col < NY ) // Make sure we don't do more work than we have data!
y[row*NY+col] = scalar * x[row*NY+col] + y[row*NY+col];
}
int main()
{
float *x, *y;
float maxError = 0;
int size = NX * NY * sizeof (float); // The total number of bytes per vector
cudaError_t ierrAsync;
cudaError_t ierrSync;
// Allocate memory
cudaMallocManaged(&x, size);
cudaMallocManaged(&y, size);
// Initialize memory
for( int i = 0; i < NX*NY; ++i )
{
x[i] = 1.0f;
y[i] = 2.0f;
}
// Get device properties
cudaDeviceProp prop;
cudaError_t ierr;
ierr = cudaGetDeviceProperties(&prop, 0);
if (ierr != cudaSuccess) { printf("Device property error: %s\n", cudaGetErrorString(ierr)); }
printf("========== DEVICE PROPERTIES ==========\n");
printf("Device number: %d\n", 0);
printf(" Device name: %s\n", prop.name);
printf(" Compute capability: %d.%d\n", prop.major, prop.minor);
printf(" Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf(" Max threads in X-dimension of block: %d\n", prop.maxThreadsDim[0]);
printf(" Max threads in Y-dimension of block: %d\n", prop.maxThreadsDim[1]);
printf(" Max threads in Z-dimension of block: %d\n\n", prop.maxThreadsDim[2]);
dim3 threads_per_block (1024,16,1);
dim3 number_of_blocks ((NX/threads_per_block.x)+1,
(NY/threads_per_block.y)+1,
1);
// Check total number of threads
if (threads_per_block.x * threads_per_block.y * threads_per_block.z > prop.maxThreadsPerBlock) {
printf("Max number of threads exceeded!\n"); goto cleanup;
}
// Check number of threads_per_block
if (prop.maxThreadsDim[0] < threads_per_block.x && prop.maxThreadsDim[1] < threads_per_block.y) { printf("Block x- or y- sizes exceeded device limits!\n"); goto cleanup; }
saxpy2D <<< number_of_blocks, threads_per_block >>> ( 2.0f, x, y );
ierrSync = cudaGetLastError();
ierrAsync = cudaDeviceSynchronize(); // Wait for the GPU to finish
if (ierrSync != cudaSuccess) { printf("Sync error: %s\n", cudaGetErrorString(ierrSync)); }
if (ierrAsync != cudaSuccess) { printf("Async error: %s\n", cudaGetErrorString(ierrAsync)); }
// Print out our Max Error
for( int i = 0; i < NX*NY; ++i )
if (abs(4-y[i]) > maxError) { maxError = abs(4-y[i]); }
printf("Max Error: %.5f", maxError);
cleanup:
// Free all our allocated memory
cudaFree( x ); cudaFree( y );
} |
87ce2746aacf513663c8ab1943e7304ad415ba39.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* louvain_main.cu
*
* @brief Simple test driver program for Louvain
*/
#include <gunrock/app/louvain/louvain_app.cu>
#include <gunrock/app/test_base.cuh>
using namespace gunrock;
/******************************************************************************
* Main
******************************************************************************/
/**
* @brief Enclosure to the main function
*/
struct main_struct {
/**
* @brief the actual main function, after type switching
* @tparam VertexT Type of vertex identifier
* @tparam SizeT Type of graph size, i.e. type of edge identifier
* @tparam ValueT Type of edge values
* @param parameters Command line parameters
* @param v,s,val Place holders for type deduction
* \return hipError_t error message(s), if any
*/
template <typename VertexT, // Use int as the vertex identifier
typename SizeT, // Use int as the graph size type
typename ValueT> // Use float as the value type
hipError_t
operator()(util::Parameters ¶meters, VertexT v, SizeT s, ValueT val) {
typedef typename app::TestGraph<VertexT, SizeT, ValueT,
graph::HAS_EDGE_VALUES | graph::HAS_CSR>
GraphT;
hipError_t retval = hipSuccess;
util::CpuTimer cpu_timer;
GraphT graph; // graph we process on
cpu_timer.Start();
GUARD_CU(graphio::LoadGraph(parameters, graph));
cpu_timer.Stop();
parameters.Set("load-time", cpu_timer.ElapsedMillis());
// GUARD_CU(graph.csr().Display());
VertexT *ref_communities = NULL;
bool quick = parameters.Get<bool>("quick");
// compute reference CPU Louvain solution
if (!quick) {
bool quiet = parameters.Get<bool>("quiet");
std::string validation = parameters.Get<std::string>("validation");
util::PrintMsg("Computing reference value ...", !quiet);
SizeT nodes = graph.nodes;
ref_communities = new VertexT[nodes];
// int num_runs = parameters.Get<int>("omp-runs");
// for (int i = 0; i < num_runs; i++)
{
int i = 0;
util::PrintMsg("__________________________", !quiet);
float elapsed = app::louvain::CPU_Reference(parameters, graph.csr(),
ref_communities);
util::PrintMsg("--------------------------\nRun " + std::to_string(i) +
" elapsed: " + std::to_string(elapsed) +
" ms, q = " +
std::to_string(app::louvain::Get_Modularity(
graph, ref_communities)),
!quiet);
}
}
std::vector<std::string> switches{"unify-segments", "advance-mode",
"omp-threads", "1st-th",
"neighborcomm-th"};
GUARD_CU(app::Switch_Parameters(
parameters, graph, switches,
[&ref_communities](util::Parameters ¶meters, GraphT &graph) {
bool quiet = parameters.Get<bool>("quiet");
// bool quick = parameters.Get<bool>("quick");
int num_runs = parameters.Get<int>("omp-runs");
std::string validation = parameters.Get<std::string>("validation");
if (num_runs > 0) {
VertexT *omp_communities = new VertexT[graph.nodes];
for (int i = 0; i < num_runs; i++) {
util::PrintMsg("__________________________", !quiet);
float elapsed = app::louvain::OMP_Reference(
parameters, graph.csr(), omp_communities);
util::PrintMsg("--------------------------", !quiet);
if (validation == "each") {
util::PrintMsg("Run " + std::to_string(i) + " elapsed: " +
std::to_string(elapsed) + " ms",
!quiet);
app::louvain::Validate_Results(
parameters, graph, omp_communities, ref_communities);
} else {
util::PrintMsg("Run " + std::to_string(i) + " elapsed: " +
std::to_string(elapsed) + " ms, q = " +
std::to_string(app::louvain::Get_Modularity(
graph, omp_communities)),
!quiet);
}
}
if (validation == "last")
app::louvain::Validate_Results(parameters, graph, omp_communities,
ref_communities);
if (ref_communities == NULL)
ref_communities = omp_communities;
else {
delete[] omp_communities;
omp_communities = NULL;
}
}
return app::louvain::RunTests(parameters, graph, ref_communities);
}));
if (ref_communities != NULL) {
delete[] ref_communities;
ref_communities = NULL;
}
return retval;
}
};
int main(int argc, char **argv) {
hipError_t retval = hipSuccess;
util::Parameters parameters("test Louvain (community detection)");
GUARD_CU(graphio::UseParameters(parameters));
GUARD_CU(app::louvain::UseParameters(parameters));
GUARD_CU(app::UseParameters_test(parameters));
GUARD_CU(parameters.Parse_CommandLine(argc, argv));
if (parameters.Get<bool>("help")) {
parameters.Print_Help();
return hipSuccess;
}
GUARD_CU(parameters.Check_Required());
return app::Switch_Types<app::VERTEXT_U32B | // app::VERTEXT_U64B |
app::SIZET_U32B | // app::SIZET_U64B |
app::VALUET_F64B>(parameters, main_struct());
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| 87ce2746aacf513663c8ab1943e7304ad415ba39.cu | // ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* louvain_main.cu
*
* @brief Simple test driver program for Louvain
*/
#include <gunrock/app/louvain/louvain_app.cu>
#include <gunrock/app/test_base.cuh>
using namespace gunrock;
/******************************************************************************
* Main
******************************************************************************/
/**
* @brief Enclosure to the main function
*/
struct main_struct {
/**
* @brief the actual main function, after type switching
* @tparam VertexT Type of vertex identifier
* @tparam SizeT Type of graph size, i.e. type of edge identifier
* @tparam ValueT Type of edge values
* @param parameters Command line parameters
* @param v,s,val Place holders for type deduction
* \return cudaError_t error message(s), if any
*/
template <typename VertexT, // Use int as the vertex identifier
typename SizeT, // Use int as the graph size type
typename ValueT> // Use float as the value type
cudaError_t
operator()(util::Parameters ¶meters, VertexT v, SizeT s, ValueT val) {
typedef typename app::TestGraph<VertexT, SizeT, ValueT,
graph::HAS_EDGE_VALUES | graph::HAS_CSR>
GraphT;
cudaError_t retval = cudaSuccess;
util::CpuTimer cpu_timer;
GraphT graph; // graph we process on
cpu_timer.Start();
GUARD_CU(graphio::LoadGraph(parameters, graph));
cpu_timer.Stop();
parameters.Set("load-time", cpu_timer.ElapsedMillis());
// GUARD_CU(graph.csr().Display());
VertexT *ref_communities = NULL;
bool quick = parameters.Get<bool>("quick");
// compute reference CPU Louvain solution
if (!quick) {
bool quiet = parameters.Get<bool>("quiet");
std::string validation = parameters.Get<std::string>("validation");
util::PrintMsg("Computing reference value ...", !quiet);
SizeT nodes = graph.nodes;
ref_communities = new VertexT[nodes];
// int num_runs = parameters.Get<int>("omp-runs");
// for (int i = 0; i < num_runs; i++)
{
int i = 0;
util::PrintMsg("__________________________", !quiet);
float elapsed = app::louvain::CPU_Reference(parameters, graph.csr(),
ref_communities);
util::PrintMsg("--------------------------\nRun " + std::to_string(i) +
" elapsed: " + std::to_string(elapsed) +
" ms, q = " +
std::to_string(app::louvain::Get_Modularity(
graph, ref_communities)),
!quiet);
}
}
std::vector<std::string> switches{"unify-segments", "advance-mode",
"omp-threads", "1st-th",
"neighborcomm-th"};
GUARD_CU(app::Switch_Parameters(
parameters, graph, switches,
[&ref_communities](util::Parameters ¶meters, GraphT &graph) {
bool quiet = parameters.Get<bool>("quiet");
// bool quick = parameters.Get<bool>("quick");
int num_runs = parameters.Get<int>("omp-runs");
std::string validation = parameters.Get<std::string>("validation");
if (num_runs > 0) {
VertexT *omp_communities = new VertexT[graph.nodes];
for (int i = 0; i < num_runs; i++) {
util::PrintMsg("__________________________", !quiet);
float elapsed = app::louvain::OMP_Reference(
parameters, graph.csr(), omp_communities);
util::PrintMsg("--------------------------", !quiet);
if (validation == "each") {
util::PrintMsg("Run " + std::to_string(i) + " elapsed: " +
std::to_string(elapsed) + " ms",
!quiet);
app::louvain::Validate_Results(
parameters, graph, omp_communities, ref_communities);
} else {
util::PrintMsg("Run " + std::to_string(i) + " elapsed: " +
std::to_string(elapsed) + " ms, q = " +
std::to_string(app::louvain::Get_Modularity(
graph, omp_communities)),
!quiet);
}
}
if (validation == "last")
app::louvain::Validate_Results(parameters, graph, omp_communities,
ref_communities);
if (ref_communities == NULL)
ref_communities = omp_communities;
else {
delete[] omp_communities;
omp_communities = NULL;
}
}
return app::louvain::RunTests(parameters, graph, ref_communities);
}));
if (ref_communities != NULL) {
delete[] ref_communities;
ref_communities = NULL;
}
return retval;
}
};
int main(int argc, char **argv) {
cudaError_t retval = cudaSuccess;
util::Parameters parameters("test Louvain (community detection)");
GUARD_CU(graphio::UseParameters(parameters));
GUARD_CU(app::louvain::UseParameters(parameters));
GUARD_CU(app::UseParameters_test(parameters));
GUARD_CU(parameters.Parse_CommandLine(argc, argv));
if (parameters.Get<bool>("help")) {
parameters.Print_Help();
return cudaSuccess;
}
GUARD_CU(parameters.Check_Required());
return app::Switch_Types<app::VERTEXT_U32B | // app::VERTEXT_U64B |
app::SIZET_U32B | // app::SIZET_U64B |
app::VALUET_F64B>(parameters, main_struct());
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
BatchNorm.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if USE_ROCM
#include "cuda/Common.h"
#include "layer/BatchNorm.h"
namespace nn {
namespace layer {
__global__ void BatchNormForwardImpl(const int N, const int neurons, const float* X, const float* mean, const float* sigma,
const float* gamma, const float* beta, float* xhat, float* Y) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < neurons) {
if(sigma[i] > 0.001f) {
const float factor = 1.0f / sigma[i];
for(int n = 0, offset = i; n < N; ++n, offset += neurons) {
xhat[offset] = (X[offset] - mean[i]) * factor;
Y[offset] = gamma[i] * xhat[offset] + beta[i];
}
} else {
for(int n = 0, offset = i; n < N; ++n, offset += neurons) {
xhat[offset] = 0;
Y[offset] = beta[i];
}
}
}
}
__global__ void BatchNormBackpropImpl(const int N, const int neurons, const float* xhat, const float* mean, const float* sigma,
float* gamma, float* beta, const float* dFdY, float* dFdX, float learning_rate) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < neurons) {
float dFdGamma = 0.0f, dFdBeta = 0.0f;
for(int n = 0, offset = i; n < N; ++n, offset += neurons) {
dFdGamma += dFdY[offset] * xhat[offset];
dFdBeta += dFdY[offset];
}
const float factor = gamma[i] * sigma[i] / N;
for(int n = 0, offset = i; n < N; ++n, offset += neurons) {
dFdX[offset] = factor * (N * dFdY[offset] - dFdGamma * xhat[offset] - dFdBeta);
}
const float delta = learning_rate / N;
gamma[i] -= delta * dFdGamma;
beta[i] -= delta * dFdBeta;
}
}
__global__ void CalculateMeanImpl(const int N, const int neurons, const float* X, float* mean) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < neurons) {
float m = 0.0f;
for(int n = 0, offset = i; n < N; ++n, offset += neurons) {
m += X[offset];
}
mean[i] = 0.99 * mean[i] + 0.01 * (m / N);
}
}
__global__ void CalculateStdDevImpl(const int N, const int neurons, const float* X, const float* mean, float* sigma2, float* sigma) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < neurons) {
float value = 0.0f;
for(int n = 0, offset = i; n < N; ++n, offset += neurons) {
const auto x_zero_mean = X[offset] - mean[i];
value += x_zero_mean * x_zero_mean;
}
sigma2[i] = 0.99 * sigma2[i] + 0.01 * value / N;
sigma[i] = sqrt(sigma2[i] + 0.1f);
}
}
template <>
void BatchNorm<Cuda>::Forward(const Matrix<Cuda>& X, bool freeze) {
const auto& shape = X.GetShape();
if((this->_Y.GetShape().cols != shape.cols)) {
throw Exception("BatchNorm forward: wrong matrix shape");
}
const auto& N = shape.rows;
const auto& neurons = shape.cols;
this->_Y.Reshape(shape);
this->_dFdX.Reshape(shape);
this->_xhat.Reshape(shape);
dim3 block_size(kVectorBlockSize);
dim3 num_of_blocks((neurons + block_size.x - 1) / block_size.x);
if(!freeze) {
hipLaunchKernelGGL(( CalculateMeanImpl), dim3(num_of_blocks), dim3(block_size), 0, 0,
N, neurons, X.DeviceData(), this->_mean.DeviceData());
Exception::ThrowOnError("BatchNorm: cannot calculate Mean values");
hipLaunchKernelGGL(( CalculateStdDevImpl), dim3(num_of_blocks), dim3(block_size), 0, 0,
N, neurons, X.DeviceData(), this->_mean.DeviceData(), this->_sigma2.DeviceData(), this->_sigma.DeviceData());
Exception::ThrowOnError("BatchNorm: cannot calculate StdDev values");
}
hipLaunchKernelGGL(( BatchNormForwardImpl), dim3(num_of_blocks), dim3(block_size), 0, 0,
N, neurons, X.DeviceData(), this->_mean.DeviceData(), this->_sigma.DeviceData(),
this->_gamma.DeviceData(), this->_beta.DeviceData(), this->_xhat.DeviceData(), this->_Y.DeviceData());
Exception::ThrowOnError("BatchNorm: cannot perform forward propagation");
}
template <>
void BatchNorm<Cuda>::Backprop(const Matrix<Cuda>& X, const Matrix<Cuda>& dFdY, float learning_rate) {
const auto& shape = X.GetShape();
if((shape.cols != dFdY.GetShape().cols) || (shape.rows != dFdY.GetShape().rows) ||
(shape.cols != this->_Y.GetShape().cols) || (shape.rows > this->_Y.GetShape().rows)) {
throw Exception("BatchNorm backprop: wrong matrix shape");
}
const auto& N = shape.rows;
const auto& neurons = shape.cols;
dim3 block_size(kVectorBlockSize);
dim3 num_of_blocks((neurons + block_size.x - 1) / block_size.x);
hipLaunchKernelGGL(( BatchNormBackpropImpl), dim3(num_of_blocks), dim3(block_size), 0, 0,
N, neurons, this->_xhat.DeviceData(), this->_mean.DeviceData(), this->_sigma.DeviceData(),
this->_gamma.DeviceData(), this->_beta.DeviceData(), dFdY.DeviceData(), this->_dFdX.DeviceData(), learning_rate);
Exception::ThrowOnError("BatchNorm: cannot perform back propagation");
}
} //namespace layer
} //namespace nn
#endif //USE_ROCM
| BatchNorm.cu | #if USE_CUDA
#include "cuda/Common.h"
#include "layer/BatchNorm.h"
namespace nn {
namespace layer {
__global__ void BatchNormForwardImpl(const int N, const int neurons, const float* X, const float* mean, const float* sigma,
const float* gamma, const float* beta, float* xhat, float* Y) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < neurons) {
if(sigma[i] > 0.001f) {
const float factor = 1.0f / sigma[i];
for(int n = 0, offset = i; n < N; ++n, offset += neurons) {
xhat[offset] = (X[offset] - mean[i]) * factor;
Y[offset] = gamma[i] * xhat[offset] + beta[i];
}
} else {
for(int n = 0, offset = i; n < N; ++n, offset += neurons) {
xhat[offset] = 0;
Y[offset] = beta[i];
}
}
}
}
__global__ void BatchNormBackpropImpl(const int N, const int neurons, const float* xhat, const float* mean, const float* sigma,
float* gamma, float* beta, const float* dFdY, float* dFdX, float learning_rate) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < neurons) {
float dFdGamma = 0.0f, dFdBeta = 0.0f;
for(int n = 0, offset = i; n < N; ++n, offset += neurons) {
dFdGamma += dFdY[offset] * xhat[offset];
dFdBeta += dFdY[offset];
}
const float factor = gamma[i] * sigma[i] / N;
for(int n = 0, offset = i; n < N; ++n, offset += neurons) {
dFdX[offset] = factor * (N * dFdY[offset] - dFdGamma * xhat[offset] - dFdBeta);
}
const float delta = learning_rate / N;
gamma[i] -= delta * dFdGamma;
beta[i] -= delta * dFdBeta;
}
}
__global__ void CalculateMeanImpl(const int N, const int neurons, const float* X, float* mean) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < neurons) {
float m = 0.0f;
for(int n = 0, offset = i; n < N; ++n, offset += neurons) {
m += X[offset];
}
mean[i] = 0.99 * mean[i] + 0.01 * (m / N);
}
}
__global__ void CalculateStdDevImpl(const int N, const int neurons, const float* X, const float* mean, float* sigma2, float* sigma) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < neurons) {
float value = 0.0f;
for(int n = 0, offset = i; n < N; ++n, offset += neurons) {
const auto x_zero_mean = X[offset] - mean[i];
value += x_zero_mean * x_zero_mean;
}
sigma2[i] = 0.99 * sigma2[i] + 0.01 * value / N;
sigma[i] = sqrt(sigma2[i] + 0.1f);
}
}
template <>
void BatchNorm<Cuda>::Forward(const Matrix<Cuda>& X, bool freeze) {
const auto& shape = X.GetShape();
if((this->_Y.GetShape().cols != shape.cols)) {
throw Exception("BatchNorm forward: wrong matrix shape");
}
const auto& N = shape.rows;
const auto& neurons = shape.cols;
this->_Y.Reshape(shape);
this->_dFdX.Reshape(shape);
this->_xhat.Reshape(shape);
dim3 block_size(kVectorBlockSize);
dim3 num_of_blocks((neurons + block_size.x - 1) / block_size.x);
if(!freeze) {
CalculateMeanImpl<<<num_of_blocks, block_size>>>
(N, neurons, X.DeviceData(), this->_mean.DeviceData());
Exception::ThrowOnError("BatchNorm: cannot calculate Mean values");
CalculateStdDevImpl<<<num_of_blocks, block_size>>>
(N, neurons, X.DeviceData(), this->_mean.DeviceData(), this->_sigma2.DeviceData(), this->_sigma.DeviceData());
Exception::ThrowOnError("BatchNorm: cannot calculate StdDev values");
}
BatchNormForwardImpl<<<num_of_blocks, block_size>>>
(N, neurons, X.DeviceData(), this->_mean.DeviceData(), this->_sigma.DeviceData(),
this->_gamma.DeviceData(), this->_beta.DeviceData(), this->_xhat.DeviceData(), this->_Y.DeviceData());
Exception::ThrowOnError("BatchNorm: cannot perform forward propagation");
}
template <>
void BatchNorm<Cuda>::Backprop(const Matrix<Cuda>& X, const Matrix<Cuda>& dFdY, float learning_rate) {
const auto& shape = X.GetShape();
if((shape.cols != dFdY.GetShape().cols) || (shape.rows != dFdY.GetShape().rows) ||
(shape.cols != this->_Y.GetShape().cols) || (shape.rows > this->_Y.GetShape().rows)) {
throw Exception("BatchNorm backprop: wrong matrix shape");
}
const auto& N = shape.rows;
const auto& neurons = shape.cols;
dim3 block_size(kVectorBlockSize);
dim3 num_of_blocks((neurons + block_size.x - 1) / block_size.x);
BatchNormBackpropImpl<<<num_of_blocks, block_size>>>
(N, neurons, this->_xhat.DeviceData(), this->_mean.DeviceData(), this->_sigma.DeviceData(),
this->_gamma.DeviceData(), this->_beta.DeviceData(), dFdY.DeviceData(), this->_dFdX.DeviceData(), learning_rate);
Exception::ThrowOnError("BatchNorm: cannot perform back propagation");
}
} //namespace layer
} //namespace nn
#endif //USE_CUDA
|
26a4f3273e3de633615943cb6c0715704026300c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//xfail:NOT_ALL_VERIFIED
//--blockDim=2048 --gridDim=64
//possible write-write race on q.p\[4\]\[0\]
struct s {
float *p[42];
};
__global__ void foo(s q) {
__requires_fresh_array(q.p[4]);
q.p[4][0] = threadIdx.x;
}
| 26a4f3273e3de633615943cb6c0715704026300c.cu | //xfail:NOT_ALL_VERIFIED
//--blockDim=2048 --gridDim=64
//possible write-write race on q.p\[4\]\[0\]
struct s {
float *p[42];
};
__global__ void foo(s q) {
__requires_fresh_array(q.p[4]);
q.p[4][0] = threadIdx.x;
}
|
b094ccb077212e90d20a7ee03e438dde05295504.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT license.
#include <torch/extension.h>
#include <vector>
#include <string>
#include <map>
#if defined(__HIPCC__)
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#elif defined(__HIPCC__)
#include <hip/hip_runtime_api.h>
#define hipModule_t hipModule_t
#define hipFunction_t hipFunction_t
#define hipModuleLoad hipModuleLoad
#define hipModuleUnload hipModuleUnload
#define hipModuleGetFunction hipModuleGetFunction
#define hipDeviceGetAttribute hipDeviceGetAttribute
#define hipDeviceAttributeComputeCapabilityMajor hipDeviceAttributeComputeCapabilityMajor
#define hipDeviceAttributeComputeCapabilityMinor hipDeviceAttributeComputeCapabilityMinor
#define hipModuleLaunchKernel(f, bx, by, bz, tx, ty, tz, shm, stream, args, extra) \
hipModuleLaunchKernel(f, bx, by, bz, tx, ty, tz, shm, stream, args, extra)
#define hipSuccess hipSuccess
#define hipSetDevice hipSetDevice
#define hipHostMalloc hipHostMalloc
#define hipHostFree hipHostFree
#define hipStream_t hipStream_t
#define hipMemcpyAsync hipMemcpyAsync
#define hipMemcpyHostToDevice hipMemcpyHostToDevice
#define hipStreamSynchronize hipStreamSynchronize
#define hipEvent_t hipEvent_t
#define hipEventCreateWithFlags hipEventCreateWithFlags
#define hipEventRecord hipEventRecord
#define hipEventQuery hipEventQuery
#define hipEventDestroy hipEventDestroy
#define hipErrorNotReady hipErrorNotReady
#define hipEventDisableTiming 0
#endif
static std::map<std::string, std::pair<hipModule_t, hipFunction_t>> module_manager;
std::vector<torch::Tensor> custom_op_forward(std::vector<torch::Tensor> inputs,
const std::string& source,
const std::string& source_path,
const std::string& hash,
const std::vector<std::string>& meta_inputs,
const std::vector<std::string>& meta_outputs)
{
hipModule_t hmod = nullptr;
hipFunction_t hfunc = nullptr;
auto it = module_manager.find(hash);
if (it == module_manager.end())
{
std::string kernel_src_path = source_path, kernel_path = source_path + ".out";
FILE *fp = fopen(kernel_src_path.c_str(), "wb");
CHECK_EQ(source.size(), fwrite(source.c_str(), 1, source.size(), fp));
fclose(fp);
int major, minor;
CHECK_EQ(hipDeviceGetAttribute(&major, hipDeviceAttributeComputeCapabilityMajor, 0), 0);
CHECK_EQ(hipDeviceGetAttribute(&minor, hipDeviceAttributeComputeCapabilityMinor, 0), 0);
#ifndef __HIP_PLATFORM_HCC__
std::string arch = std::to_string(major * 10 + minor);
std::string compile_cmd = "/usr/local/cuda/bin/nvcc " + kernel_src_path + " -gencode arch=compute_" + arch + ",code=sm_" + arch + " --fatbin -O2 -o " + kernel_path;
#else
std::string arch = std::to_string(major * 100 + minor);
std::string compile_cmd = "/opt/rocm/bin/hipcc " + kernel_src_path + " --amdgpu-target=gfx" + arch + " --genco -Wno-ignored-attributes -O2 -o " + kernel_path;
#endif
LOG(INFO) << "MainOpKernel is compiling dynamtic kernel (arch=" << arch << "): " << kernel_path;
CHECK_EQ(system(compile_cmd.c_str()), 0);
CHECK_EQ(hipModuleLoad(&hmod, kernel_path.c_str()), 0);
CHECK_EQ(hipModuleGetFunction(&hfunc, hmod, "template_op_kernel0"), 0);
module_manager[hash] = {hmod, hfunc};
}
else
hmod = it->second.first, hfunc = it->second.second;
int bx, by, bz, tx, ty, tz;
int i, pos, next;
pos = source.find("// [thread_extent] blockIdx.x"), next = source.find("= ", pos + 1), bx = (pos >= 0 && next >= 0) ? std::atoi(source.c_str() + next + 2) : 1;
pos = source.find("// [thread_extent] blockIdx.y"), next = source.find("= ", pos + 1), by = (pos >= 0 && next >= 0) ? std::atoi(source.c_str() + next + 2) : 1;
pos = source.find("// [thread_extent] blockIdx.z"), next = source.find("= ", pos + 1), bz = (pos >= 0 && next >= 0) ? std::atoi(source.c_str() + next + 2) : 1;
pos = source.find("// [thread_extent] threadIdx.x"), next = source.find("= ", pos + 1), tx = (pos >= 0 && next >= 0) ? std::atoi(source.c_str() + next + 2) : 1;
pos = source.find("// [thread_extent] threadIdx.y"), next = source.find("= ", pos + 1), ty = (pos >= 0 && next >= 0) ? std::atoi(source.c_str() + next + 2) : 1;
pos = source.find("// [thread_extent] threadIdx.z"), next = source.find("= ", pos + 1), tz = (pos >= 0 && next >= 0) ? std::atoi(source.c_str() + next + 2) : 1;
std::vector<void*> args, p_args;
pos = source.find(") {\n"), next = source.rfind('(', pos) + 1;
CHECK_EQ(true, (pos > 0 && next > 0));
auto code_args = source.substr(next, pos - next) + ",";
args.resize(meta_inputs.size() + meta_outputs.size()), p_args.resize(args.size());
for (i = pos = 0; next = code_args.find(',', pos), next >= 0; pos = next + 1, ++i) {
int at = code_args.rfind(' ', next) + 1;
auto arg_name = code_args.substr(at, next - at);
CHECK_NE(arg_name, "");
if (arg_name[0] == 'i')
p_args[i] = &args[std::atoi(arg_name.c_str() + 5)];
else
p_args[i] = &args[meta_inputs.size() + std::atoi(arg_name.c_str() + 6)];
}
std::vector<std::vector<int64_t>> output_shapes;
output_shapes.clear();
for (int y = 0; y < meta_outputs.size(); ++y) {
auto meta_shape = meta_outputs[y].substr(0, meta_outputs[y].find('/')) + "-";
std::vector<int64_t> shape_builder;
for (int i = 0, j = 1; j < meta_shape.size(); ++j) {
if (meta_shape[j] == '-')
shape_builder.push_back(std::atoi(meta_shape.c_str() + i)), i = j + 1;
}
output_shapes.push_back(std::move(shape_builder));
}
std::vector<torch::Tensor> outputs;
outputs.resize(meta_outputs.size());
auto options =
torch::TensorOptions()
.dtype(inputs[0].dtype())
.device(inputs[0].device().type(), inputs[0].device().index())
.layout(torch::kStrided)
.requires_grad(true);
for (int i = 0; i < outputs.size(); ++i) {
outputs[i] = torch::zeros(output_shapes[i], options);
}
for (int i = 0; i < inputs.size(); ++i)
{
args[i] = (void*)inputs[i].data_ptr();
}
for (int i = 0; i < meta_outputs.size(); ++i)
{
args[meta_inputs.size() + i] = (void*)outputs[i].data_ptr();
}
CHECK_EQ(hipModuleLaunchKernel(hfunc, bx, by, bz, tx, ty, tz, 0, 0, p_args.data(), NULL), 0);
return outputs;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &custom_op_forward, "custom forward (GPU)");
}
| b094ccb077212e90d20a7ee03e438dde05295504.cu | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT license.
#include <torch/extension.h>
#include <vector>
#include <string>
#include <map>
#if defined(__CUDACC__)
#include <cuda.h>
#include <cuda_runtime_api.h>
#elif defined(__HIPCC__)
#include <hip/hip_runtime_api.h>
#define CUmodule hipModule_t
#define CUfunction hipFunction_t
#define cuModuleLoad hipModuleLoad
#define cuModuleUnload hipModuleUnload
#define cuModuleGetFunction hipModuleGetFunction
#define cuDeviceGetAttribute hipDeviceGetAttribute
#define CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR hipDeviceAttributeComputeCapabilityMajor
#define CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR hipDeviceAttributeComputeCapabilityMinor
#define cuLaunchKernel(f, bx, by, bz, tx, ty, tz, shm, stream, args, extra) \
hipModuleLaunchKernel(f, bx, by, bz, tx, ty, tz, shm, stream, args, extra)
#define cudaSuccess hipSuccess
#define cudaSetDevice hipSetDevice
#define cudaMallocHost hipHostMalloc
#define cudaFreeHost hipHostFree
#define cudaStream_t hipStream_t
#define cudaMemcpyAsync hipMemcpyAsync
#define cudaMemcpyHostToDevice hipMemcpyHostToDevice
#define cudaStreamSynchronize hipStreamSynchronize
#define cudaEvent_t hipEvent_t
#define cudaEventCreateWithFlags hipEventCreateWithFlags
#define cudaEventRecord hipEventRecord
#define cudaEventQuery hipEventQuery
#define cudaEventDestroy hipEventDestroy
#define cudaErrorNotReady hipErrorNotReady
#define cudaEventDisableTiming 0
#endif
static std::map<std::string, std::pair<CUmodule, CUfunction>> module_manager;
std::vector<torch::Tensor> custom_op_forward(std::vector<torch::Tensor> inputs,
const std::string& source,
const std::string& source_path,
const std::string& hash,
const std::vector<std::string>& meta_inputs,
const std::vector<std::string>& meta_outputs)
{
CUmodule hmod = nullptr;
CUfunction hfunc = nullptr;
auto it = module_manager.find(hash);
if (it == module_manager.end())
{
std::string kernel_src_path = source_path, kernel_path = source_path + ".out";
FILE *fp = fopen(kernel_src_path.c_str(), "wb");
CHECK_EQ(source.size(), fwrite(source.c_str(), 1, source.size(), fp));
fclose(fp);
int major, minor;
CHECK_EQ(cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, 0), 0);
CHECK_EQ(cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, 0), 0);
#ifndef __HIP_PLATFORM_HCC__
std::string arch = std::to_string(major * 10 + minor);
std::string compile_cmd = "/usr/local/cuda/bin/nvcc " + kernel_src_path + " -gencode arch=compute_" + arch + ",code=sm_" + arch + " --fatbin -O2 -o " + kernel_path;
#else
std::string arch = std::to_string(major * 100 + minor);
std::string compile_cmd = "/opt/rocm/bin/hipcc " + kernel_src_path + " --amdgpu-target=gfx" + arch + " --genco -Wno-ignored-attributes -O2 -o " + kernel_path;
#endif
LOG(INFO) << "MainOpKernel is compiling dynamtic kernel (arch=" << arch << "): " << kernel_path;
CHECK_EQ(system(compile_cmd.c_str()), 0);
CHECK_EQ(cuModuleLoad(&hmod, kernel_path.c_str()), 0);
CHECK_EQ(cuModuleGetFunction(&hfunc, hmod, "template_op_kernel0"), 0);
module_manager[hash] = {hmod, hfunc};
}
else
hmod = it->second.first, hfunc = it->second.second;
int bx, by, bz, tx, ty, tz;
int i, pos, next;
pos = source.find("// [thread_extent] blockIdx.x"), next = source.find("= ", pos + 1), bx = (pos >= 0 && next >= 0) ? std::atoi(source.c_str() + next + 2) : 1;
pos = source.find("// [thread_extent] blockIdx.y"), next = source.find("= ", pos + 1), by = (pos >= 0 && next >= 0) ? std::atoi(source.c_str() + next + 2) : 1;
pos = source.find("// [thread_extent] blockIdx.z"), next = source.find("= ", pos + 1), bz = (pos >= 0 && next >= 0) ? std::atoi(source.c_str() + next + 2) : 1;
pos = source.find("// [thread_extent] threadIdx.x"), next = source.find("= ", pos + 1), tx = (pos >= 0 && next >= 0) ? std::atoi(source.c_str() + next + 2) : 1;
pos = source.find("// [thread_extent] threadIdx.y"), next = source.find("= ", pos + 1), ty = (pos >= 0 && next >= 0) ? std::atoi(source.c_str() + next + 2) : 1;
pos = source.find("// [thread_extent] threadIdx.z"), next = source.find("= ", pos + 1), tz = (pos >= 0 && next >= 0) ? std::atoi(source.c_str() + next + 2) : 1;
std::vector<void*> args, p_args;
pos = source.find(") {\n"), next = source.rfind('(', pos) + 1;
CHECK_EQ(true, (pos > 0 && next > 0));
auto code_args = source.substr(next, pos - next) + ",";
args.resize(meta_inputs.size() + meta_outputs.size()), p_args.resize(args.size());
for (i = pos = 0; next = code_args.find(',', pos), next >= 0; pos = next + 1, ++i) {
int at = code_args.rfind(' ', next) + 1;
auto arg_name = code_args.substr(at, next - at);
CHECK_NE(arg_name, "");
if (arg_name[0] == 'i')
p_args[i] = &args[std::atoi(arg_name.c_str() + 5)];
else
p_args[i] = &args[meta_inputs.size() + std::atoi(arg_name.c_str() + 6)];
}
std::vector<std::vector<int64_t>> output_shapes;
output_shapes.clear();
for (int y = 0; y < meta_outputs.size(); ++y) {
auto meta_shape = meta_outputs[y].substr(0, meta_outputs[y].find('/')) + "-";
std::vector<int64_t> shape_builder;
for (int i = 0, j = 1; j < meta_shape.size(); ++j) {
if (meta_shape[j] == '-')
shape_builder.push_back(std::atoi(meta_shape.c_str() + i)), i = j + 1;
}
output_shapes.push_back(std::move(shape_builder));
}
std::vector<torch::Tensor> outputs;
outputs.resize(meta_outputs.size());
auto options =
torch::TensorOptions()
.dtype(inputs[0].dtype())
.device(inputs[0].device().type(), inputs[0].device().index())
.layout(torch::kStrided)
.requires_grad(true);
for (int i = 0; i < outputs.size(); ++i) {
outputs[i] = torch::zeros(output_shapes[i], options);
}
for (int i = 0; i < inputs.size(); ++i)
{
args[i] = (void*)inputs[i].data_ptr();
}
for (int i = 0; i < meta_outputs.size(); ++i)
{
args[meta_inputs.size() + i] = (void*)outputs[i].data_ptr();
}
CHECK_EQ(cuLaunchKernel(hfunc, bx, by, bz, tx, ty, tz, 0, 0, p_args.data(), NULL), 0);
return outputs;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &custom_op_forward, "custom forward (GPU)");
}
|
29984ca10ef9f607762148428a107784a584280b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cupp/deviceT/vector.h"
#include "cupp/common.h"
#include "OpenSteer/deviceT/Vec3.h"
#include "OpenSteer/CuPPConfig.h"
#include "OpenSteer/kernels.h"
using OpenSteer::deviceT::Vec3;
#if 0
// called V3
__global__ void find_neighbours (const cupp::deviceT::vector< Vec3 > &positions_,
const float r2,
cupp::deviceT::vector< int > &find_neighbours_result_)
{
// make local copies of our references
const cupp::deviceT::vector< Vec3 > positions(positions_);
cupp::deviceT::vector< int > find_neighbours_result(find_neighbours_result_);
// constants that are needed below
const unsigned int my_index = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int number_of_boids = gridDim.x*blockDim.x; // number of boids == number of threads
int neighbours_found = 0;
const Vec3 position = positions[my_index];
int result[neighbour_size_max];
for (int i=0; i<neighbour_size_max; ++i) {
result[i]=-1;
}
__shared__ Vec3 s_positions[threads_per_block];
for (int base=0; base < number_of_boids; base+=threads_per_block) {
s_positions[threadIdx.x] = positions[base + threadIdx.x];
__syncthreads();
int i=0;
while (i < threads_per_block) {
const Vec3 offset = position - s_positions[i];
float const d2 = offset.lengthSquared();
const int cur_index = base + i;
if (d2 < r2 && cur_index != my_index) {
if (neighbours_found < neighbour_size_max) {
result[neighbours_found] = cur_index;
++neighbours_found;
} else {
float max_neighbour_distance = 0.0f;
int max_neighbour_distance_index = 0;
for ( int j = 0; j < neighbour_size_max; ++j ) {
float const dist = ( position - positions[ result[j] ] ).lengthSquared();
if ( dist > max_neighbour_distance ) {
max_neighbour_distance = dist;
max_neighbour_distance_index = j;
}
}
if (max_neighbour_distance>d2) {
result[max_neighbour_distance_index] = cur_index;
}
}
}
++i;
}
__syncthreads();
}
const int result_index_base = my_index*neighbour_size_max;
for (int i=0; i<neighbour_size_max; ++i) {
find_neighbours_result[result_index_base + i] = result[i];
}
}
#endif
#if 0
// called V2
// the one with the strange behavior (fast on second run)
__global__ void find_neighbours (const cupp::deviceT::vector< Vec3 > &positions_,
const float r2,
cupp::deviceT::vector< int > &find_neighbours_result_)
{
// make local copies of our references
const cupp::deviceT::vector< Vec3 > positions(positions_);
cupp::deviceT::vector< int > find_neighbours_result(find_neighbours_result_);
// constants that are needed below
const unsigned int my_index = blockIdx.x*blockDim.x + threadIdx.x;
// use shared here reduces the number of registers need, which results
// in the possibility of more blocks per multiprocessor and a higher occupancy of each multiprocessor
__shared__ int number_of_boids;
number_of_boids = gridDim.x*blockDim.x; // number of boids == number of threads
int neighbours_found = 0;
const Vec3 position = positions[my_index];
int result[neighbour_size_max];
float result_distance[neighbour_size_max];
for (int i=0; i<neighbour_size_max; ++i) {
result[i]=-1;
}
int max_neighbour_distance_index = 0;
float max_neighbour_distance = 0.0f;
__shared__ Vec3 s_positions[threads_per_block];
for (int base=0; base<number_of_boids; base+=threads_per_block) {
// read positions from global to shared memory
s_positions[threadIdx.x] = positions[base + threadIdx.x];
__syncthreads();
for (int i=0; i<threads_per_block; ++i) {
const float d2 = (position - s_positions[i]).lengthSquared();
// fill variables with dummy values we can write in every cycle
int cur_index = result[0];
int result_index = 0;
float cur_neighbour_distance = result_distance[0];
if (d2 < r2 && (base + i != my_index) ) {
if (neighbours_found < neighbour_size_max) {
cur_neighbour_distance = d2;
cur_index = base + i;
result_index = neighbours_found;
if (max_neighbour_distance<d2) {
max_neighbour_distance = d2;
}
++neighbours_found;
} else {
if (d2 < max_neighbour_distance) {
cur_neighbour_distance = d2;
cur_index = base + i;
result_index = max_neighbour_distance_index;
}
}
}
// write result
result [result_index] = cur_index;
result_distance[result_index] = cur_neighbour_distance;
// update max_neighbour_distance & index
if (d2 < max_neighbour_distance) {
for (int j=0; j<neighbour_size_max; ++j) {
if (result_distance[j] > max_neighbour_distance) {
max_neighbour_distance = result_distance[j];
max_neighbour_distance_index = j;
}
}
}
}
__syncthreads();
}
// write the result to global memory
const int result_index_base = my_index*neighbour_size_max;
for (int i=0; i<neighbour_size_max; ++i) {
find_neighbours_result[result_index_base + i] = result[i];
}
}
#endif
#if 0
// Paper: first GPU version
__global__ void find_neighbours (const cupp::deviceT::vector< Vec3 > &positions_,
const float r2,
cupp::deviceT::vector< int > &find_neighbours_result_)
{
const cupp::deviceT::vector< Vec3 > positions = positions_;
cupp::deviceT::vector< int > find_neighbours_result = find_neighbours_result_;
const unsigned int my_index = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int number_of_boids = gridDim.x*blockDim.x; // number of boids == number of threads
int neighbours_found = 0;
float max_neighbour_distance = 0.0f;
int max_neighbour_distance_index = 0;
const Vec3 position = positions[my_index];
int result[neighbour_size_max];
for (int i=0; i<neighbour_size_max; ++i) {
result[i]=-1;
}
int i=0;
while (i < number_of_boids && neighbours_found<neighbour_size_max) {
const Vec3 offset = position - positions[i];
float const d2 = offset.lengthSquared();
if (d2<r2 && i!=my_index) {
if ( d2 > max_neighbour_distance ) {
max_neighbour_distance = d2;
max_neighbour_distance_index = neighbours_found;
}
result[neighbours_found] = i;
++neighbours_found;
}
++i;
}
while (i < number_of_boids) {
const Vec3 offset = position - positions[i];
float const d2 = offset.lengthSquared();
if (d2<r2 && d2 < max_neighbour_distance && i != my_index) {
result[ max_neighbour_distance_index ] = i;
max_neighbour_distance = d2; // just temporary
for ( int i = 0; i < neighbour_size_max; ++i ) {
float const dist = ( position - positions[ result[i] ] ).lengthSquared();
if ( dist > max_neighbour_distance ) {
max_neighbour_distance = dist;
max_neighbour_distance_index = i;
}
}
}
++i;
}
for (int i=0; i<neighbour_size_max; ++i) {
find_neighbours_result[my_index*neighbour_size_max + i] = result[i];
}
}
#endif
// paper version with shared memory
__global__ void find_neighbours (const cupp::deviceT::vector< Vec3 > &positions_,
const float r2,
cupp::deviceT::vector< int > &find_neighbours_result_)
{
// make local copies of our references
const cupp::deviceT::vector< OpenSteer::deviceT::Vec3 > positions(positions_);
cupp::deviceT::vector< int > find_neighbours_result(find_neighbours_result_);
// constants that are needed below
const unsigned int my_index = blockIdx.x*blockDim.x + threadIdx.x;
// use shared here reduces the number of registers need, which results
// in the possibility of more blocks per multiprocessor and a higher occupancy of each multiprocessor
__shared__ unsigned int number_of_boids;
number_of_boids = gridDim.x*blockDim.x; // number of boids == number of threads
int neighbours_found = 0;
const Vec3 position = positions[my_index];
int result[neighbour_size_max];
float result_distance[neighbour_size_max];
for (int i=0; i<neighbour_size_max; ++i) {
result[i]=-1;
}
__shared__ Vec3 s_positions[threads_per_block];
for (int base=0; base < number_of_boids; base+=threads_per_block) {
s_positions[threadIdx.x] = positions[base + threadIdx.x];
__syncthreads();
int i=0;
#pragma unroll 64
while (i < threads_per_block) {
const Vec3 offset = position - s_positions[i];
const float d2 = offset.lengthSquared();
const int cur_index = base + i;
if (d2 < r2 && cur_index != my_index) {
if (neighbours_found < neighbour_size_max) {
result[neighbours_found] = cur_index;
result_distance[neighbours_found] = d2;
++neighbours_found;
} else {
float max_neighbour_distance = 0.0f;
int max_neighbour_distance_index = 0;
for ( int j = 0; j < neighbour_size_max; ++j ) {
const float dist = result_distance[j];
if ( dist > max_neighbour_distance ) {
max_neighbour_distance = dist;
max_neighbour_distance_index = j;
}
}
if (max_neighbour_distance>d2) {
result[max_neighbour_distance_index] = cur_index;
result_distance[max_neighbour_distance_index] = d2;
}
}
}
++i;
}
__syncthreads();
}
const int result_index_base = my_index*neighbour_size_max;
for (int i=0; i<neighbour_size_max; ++i) {
find_neighbours_result[result_index_base + i] = result[i];
}
}
#if 0
// called V5
// less registeres, but slower performance compared to shared shard memory paper version
__global__ void find_neighbours (const cupp::deviceT::vector< Vec3 > &positions_,
const float r2,
cupp::deviceT::vector< int > &find_neighbours_result_)
{
// make local copies of our references
const cupp::deviceT::vector< Vec3 > positions(positions_);
cupp::deviceT::vector< int > find_neighbours_result(find_neighbours_result_);
// constants that are needed below
const unsigned int my_index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ unsigned int number_of_boids;
number_of_boids = gridDim.x*blockDim.x; // number of boids == number of threads
int neighbours_found = 0;
const Vec3 position = positions[my_index];
int result[neighbour_size_max];
float result_distance[neighbour_size_max];
for (int i=0; i<neighbour_size_max; ++i) {
result[i]=-1;
}
float max_neighbour_distance = 0.0f;
int max_neighbour_distance_index = 0;
__shared__ Vec3 s_positions[threads_per_block];
for (int base=0; base < number_of_boids; base+=threads_per_block) {
s_positions[threadIdx.x] = positions[base + threadIdx.x];
__syncthreads();
int i=0;
while (i < threads_per_block) {
const Vec3 offset = position - s_positions[i];
float const d2 = offset.lengthSquared();
const int cur_index = base + i;
if (d2 < r2 && cur_index != my_index && neighbours_found < neighbour_size_max) {
result[neighbours_found] = cur_index;
result_distance[neighbours_found] = d2;
if (d2 > max_neighbour_distance) {
max_neighbour_distance = d2;
max_neighbour_distance_index = neighbours_found;
}
++neighbours_found;
}
if (d2 < r2 && cur_index != my_index && max_neighbour_distance>d2 && neighbours_found >= neighbour_size_max) {
result[max_neighbour_distance_index] = cur_index;
result_distance[max_neighbour_distance_index] = d2;
for ( int j = 0; j < neighbour_size_max; ++j ) {
const float dist = result_distance[j];
if ( dist > max_neighbour_distance ) {
max_neighbour_distance = dist;
max_neighbour_distance_index = j;
}
}
}
++i;
}
__syncthreads();
}
const int result_index_base = my_index*neighbour_size_max;
for (int i=0; i<neighbour_size_max; ++i) {
find_neighbours_result[result_index_base + i] = result[i];
}
}
#endif
find_neighbours_kernelT get_find_neighbours_kernel() {
return (find_neighbours_kernelT)find_neighbours;
}
| 29984ca10ef9f607762148428a107784a584280b.cu | #include "cupp/deviceT/vector.h"
#include "cupp/common.h"
#include "OpenSteer/deviceT/Vec3.h"
#include "OpenSteer/CuPPConfig.h"
#include "OpenSteer/kernels.h"
using OpenSteer::deviceT::Vec3;
#if 0
// called V3
__global__ void find_neighbours (const cupp::deviceT::vector< Vec3 > &positions_,
const float r2,
cupp::deviceT::vector< int > &find_neighbours_result_)
{
// make local copies of our references
const cupp::deviceT::vector< Vec3 > positions(positions_);
cupp::deviceT::vector< int > find_neighbours_result(find_neighbours_result_);
// constants that are needed below
const unsigned int my_index = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int number_of_boids = gridDim.x*blockDim.x; // number of boids == number of threads
int neighbours_found = 0;
const Vec3 position = positions[my_index];
int result[neighbour_size_max];
for (int i=0; i<neighbour_size_max; ++i) {
result[i]=-1;
}
__shared__ Vec3 s_positions[threads_per_block];
for (int base=0; base < number_of_boids; base+=threads_per_block) {
s_positions[threadIdx.x] = positions[base + threadIdx.x];
__syncthreads();
int i=0;
while (i < threads_per_block) {
const Vec3 offset = position - s_positions[i];
float const d2 = offset.lengthSquared();
const int cur_index = base + i;
if (d2 < r2 && cur_index != my_index) {
if (neighbours_found < neighbour_size_max) {
result[neighbours_found] = cur_index;
++neighbours_found;
} else {
float max_neighbour_distance = 0.0f;
int max_neighbour_distance_index = 0;
for ( int j = 0; j < neighbour_size_max; ++j ) {
float const dist = ( position - positions[ result[j] ] ).lengthSquared();
if ( dist > max_neighbour_distance ) {
max_neighbour_distance = dist;
max_neighbour_distance_index = j;
}
}
if (max_neighbour_distance>d2) {
result[max_neighbour_distance_index] = cur_index;
}
}
}
++i;
}
__syncthreads();
}
const int result_index_base = my_index*neighbour_size_max;
for (int i=0; i<neighbour_size_max; ++i) {
find_neighbours_result[result_index_base + i] = result[i];
}
}
#endif
#if 0
// called V2
// the one with the strange behavior (fast on second run)
__global__ void find_neighbours (const cupp::deviceT::vector< Vec3 > &positions_,
const float r2,
cupp::deviceT::vector< int > &find_neighbours_result_)
{
// make local copies of our references
const cupp::deviceT::vector< Vec3 > positions(positions_);
cupp::deviceT::vector< int > find_neighbours_result(find_neighbours_result_);
// constants that are needed below
const unsigned int my_index = blockIdx.x*blockDim.x + threadIdx.x;
// use shared here reduces the number of registers need, which results
// in the possibility of more blocks per multiprocessor and a higher occupancy of each multiprocessor
__shared__ int number_of_boids;
number_of_boids = gridDim.x*blockDim.x; // number of boids == number of threads
int neighbours_found = 0;
const Vec3 position = positions[my_index];
int result[neighbour_size_max];
float result_distance[neighbour_size_max];
for (int i=0; i<neighbour_size_max; ++i) {
result[i]=-1;
}
int max_neighbour_distance_index = 0;
float max_neighbour_distance = 0.0f;
__shared__ Vec3 s_positions[threads_per_block];
for (int base=0; base<number_of_boids; base+=threads_per_block) {
// read positions from global to shared memory
s_positions[threadIdx.x] = positions[base + threadIdx.x];
__syncthreads();
for (int i=0; i<threads_per_block; ++i) {
const float d2 = (position - s_positions[i]).lengthSquared();
// fill variables with dummy values we can write in every cycle
int cur_index = result[0];
int result_index = 0;
float cur_neighbour_distance = result_distance[0];
if (d2 < r2 && (base + i != my_index) ) {
if (neighbours_found < neighbour_size_max) {
cur_neighbour_distance = d2;
cur_index = base + i;
result_index = neighbours_found;
if (max_neighbour_distance<d2) {
max_neighbour_distance = d2;
}
++neighbours_found;
} else {
if (d2 < max_neighbour_distance) {
cur_neighbour_distance = d2;
cur_index = base + i;
result_index = max_neighbour_distance_index;
}
}
}
// write result
result [result_index] = cur_index;
result_distance[result_index] = cur_neighbour_distance;
// update max_neighbour_distance & index
if (d2 < max_neighbour_distance) {
for (int j=0; j<neighbour_size_max; ++j) {
if (result_distance[j] > max_neighbour_distance) {
max_neighbour_distance = result_distance[j];
max_neighbour_distance_index = j;
}
}
}
}
__syncthreads();
}
// write the result to global memory
const int result_index_base = my_index*neighbour_size_max;
for (int i=0; i<neighbour_size_max; ++i) {
find_neighbours_result[result_index_base + i] = result[i];
}
}
#endif
#if 0
// Paper: first GPU version
__global__ void find_neighbours (const cupp::deviceT::vector< Vec3 > &positions_,
const float r2,
cupp::deviceT::vector< int > &find_neighbours_result_)
{
const cupp::deviceT::vector< Vec3 > positions = positions_;
cupp::deviceT::vector< int > find_neighbours_result = find_neighbours_result_;
const unsigned int my_index = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int number_of_boids = gridDim.x*blockDim.x; // number of boids == number of threads
int neighbours_found = 0;
float max_neighbour_distance = 0.0f;
int max_neighbour_distance_index = 0;
const Vec3 position = positions[my_index];
int result[neighbour_size_max];
for (int i=0; i<neighbour_size_max; ++i) {
result[i]=-1;
}
int i=0;
while (i < number_of_boids && neighbours_found<neighbour_size_max) {
const Vec3 offset = position - positions[i];
float const d2 = offset.lengthSquared();
if (d2<r2 && i!=my_index) {
if ( d2 > max_neighbour_distance ) {
max_neighbour_distance = d2;
max_neighbour_distance_index = neighbours_found;
}
result[neighbours_found] = i;
++neighbours_found;
}
++i;
}
while (i < number_of_boids) {
const Vec3 offset = position - positions[i];
float const d2 = offset.lengthSquared();
if (d2<r2 && d2 < max_neighbour_distance && i != my_index) {
result[ max_neighbour_distance_index ] = i;
max_neighbour_distance = d2; // just temporary
for ( int i = 0; i < neighbour_size_max; ++i ) {
float const dist = ( position - positions[ result[i] ] ).lengthSquared();
if ( dist > max_neighbour_distance ) {
max_neighbour_distance = dist;
max_neighbour_distance_index = i;
}
}
}
++i;
}
for (int i=0; i<neighbour_size_max; ++i) {
find_neighbours_result[my_index*neighbour_size_max + i] = result[i];
}
}
#endif
// paper version with shared memory
__global__ void find_neighbours (const cupp::deviceT::vector< Vec3 > &positions_,
const float r2,
cupp::deviceT::vector< int > &find_neighbours_result_)
{
// make local copies of our references
const cupp::deviceT::vector< OpenSteer::deviceT::Vec3 > positions(positions_);
cupp::deviceT::vector< int > find_neighbours_result(find_neighbours_result_);
// constants that are needed below
const unsigned int my_index = blockIdx.x*blockDim.x + threadIdx.x;
// use shared here reduces the number of registers need, which results
// in the possibility of more blocks per multiprocessor and a higher occupancy of each multiprocessor
__shared__ unsigned int number_of_boids;
number_of_boids = gridDim.x*blockDim.x; // number of boids == number of threads
int neighbours_found = 0;
const Vec3 position = positions[my_index];
int result[neighbour_size_max];
float result_distance[neighbour_size_max];
for (int i=0; i<neighbour_size_max; ++i) {
result[i]=-1;
}
__shared__ Vec3 s_positions[threads_per_block];
for (int base=0; base < number_of_boids; base+=threads_per_block) {
s_positions[threadIdx.x] = positions[base + threadIdx.x];
__syncthreads();
int i=0;
#pragma unroll 64
while (i < threads_per_block) {
const Vec3 offset = position - s_positions[i];
const float d2 = offset.lengthSquared();
const int cur_index = base + i;
if (d2 < r2 && cur_index != my_index) {
if (neighbours_found < neighbour_size_max) {
result[neighbours_found] = cur_index;
result_distance[neighbours_found] = d2;
++neighbours_found;
} else {
float max_neighbour_distance = 0.0f;
int max_neighbour_distance_index = 0;
for ( int j = 0; j < neighbour_size_max; ++j ) {
const float dist = result_distance[j];
if ( dist > max_neighbour_distance ) {
max_neighbour_distance = dist;
max_neighbour_distance_index = j;
}
}
if (max_neighbour_distance>d2) {
result[max_neighbour_distance_index] = cur_index;
result_distance[max_neighbour_distance_index] = d2;
}
}
}
++i;
}
__syncthreads();
}
const int result_index_base = my_index*neighbour_size_max;
for (int i=0; i<neighbour_size_max; ++i) {
find_neighbours_result[result_index_base + i] = result[i];
}
}
#if 0
// called V5
// less registeres, but slower performance compared to shared shard memory paper version
__global__ void find_neighbours (const cupp::deviceT::vector< Vec3 > &positions_,
const float r2,
cupp::deviceT::vector< int > &find_neighbours_result_)
{
// make local copies of our references
const cupp::deviceT::vector< Vec3 > positions(positions_);
cupp::deviceT::vector< int > find_neighbours_result(find_neighbours_result_);
// constants that are needed below
const unsigned int my_index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ unsigned int number_of_boids;
number_of_boids = gridDim.x*blockDim.x; // number of boids == number of threads
int neighbours_found = 0;
const Vec3 position = positions[my_index];
int result[neighbour_size_max];
float result_distance[neighbour_size_max];
for (int i=0; i<neighbour_size_max; ++i) {
result[i]=-1;
}
float max_neighbour_distance = 0.0f;
int max_neighbour_distance_index = 0;
__shared__ Vec3 s_positions[threads_per_block];
for (int base=0; base < number_of_boids; base+=threads_per_block) {
s_positions[threadIdx.x] = positions[base + threadIdx.x];
__syncthreads();
int i=0;
while (i < threads_per_block) {
const Vec3 offset = position - s_positions[i];
float const d2 = offset.lengthSquared();
const int cur_index = base + i;
if (d2 < r2 && cur_index != my_index && neighbours_found < neighbour_size_max) {
result[neighbours_found] = cur_index;
result_distance[neighbours_found] = d2;
if (d2 > max_neighbour_distance) {
max_neighbour_distance = d2;
max_neighbour_distance_index = neighbours_found;
}
++neighbours_found;
}
if (d2 < r2 && cur_index != my_index && max_neighbour_distance>d2 && neighbours_found >= neighbour_size_max) {
result[max_neighbour_distance_index] = cur_index;
result_distance[max_neighbour_distance_index] = d2;
for ( int j = 0; j < neighbour_size_max; ++j ) {
const float dist = result_distance[j];
if ( dist > max_neighbour_distance ) {
max_neighbour_distance = dist;
max_neighbour_distance_index = j;
}
}
}
++i;
}
__syncthreads();
}
const int result_index_base = my_index*neighbour_size_max;
for (int i=0; i<neighbour_size_max; ++i) {
find_neighbours_result[result_index_base + i] = result[i];
}
}
#endif
find_neighbours_kernelT get_find_neighbours_kernel() {
return (find_neighbours_kernelT)find_neighbours;
}
|
9e793e876b7c7567f612e0d0c9f2bba1085e3341.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) {
for (int i=0; i < var_1; ++i) {
if (comp >= -0.0f + var_4 - var_5 / -1.5162E34f) {
for (int i=0; i < var_2; ++i) {
float tmp_1 = -1.9481E34f;
float tmp_2 = +1.3990E-37f;
float tmp_3 = +1.9859E34f;
comp = tmp_3 * tmp_2 + tmp_1 * coshf(-1.5712E35f * -1.9142E-35f);
for (int i=0; i < var_3; ++i) {
comp = (var_6 + +0.0f + (var_7 / logf((+1.9649E20f - atanf((+1.1659E-37f + fabsf(-0.0f / (var_8 + var_9 - +1.8125E35f))))))));
comp = -1.3785E18f + var_10;
comp = tanhf(+1.0128E-41f);
}
if (comp < (var_11 - var_12)) {
float tmp_4 = +1.7497E35f;
float tmp_5 = -0.0f;
comp = tmp_5 / tmp_4 / (var_13 - (var_14 / floorf(atan2f(-1.0374E-44f, atan2f(-1.6392E-41f - var_15 / -0.0f / atanf(var_16 + (+1.0754E35f - var_17 - var_18 * +0.0f)), -1.1466E35f + var_19 * atanf(var_20 / (var_21 / var_22 - floorf(-1.0355E-44f + (+1.3409E-22f * (+1.2695E-35f / -1.2097E34f / -1.5281E-20f))))))))));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23);
hipDeviceSynchronize();
return 0;
}
| 9e793e876b7c7567f612e0d0c9f2bba1085e3341.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) {
for (int i=0; i < var_1; ++i) {
if (comp >= -0.0f + var_4 - var_5 / -1.5162E34f) {
for (int i=0; i < var_2; ++i) {
float tmp_1 = -1.9481E34f;
float tmp_2 = +1.3990E-37f;
float tmp_3 = +1.9859E34f;
comp = tmp_3 * tmp_2 + tmp_1 * coshf(-1.5712E35f * -1.9142E-35f);
for (int i=0; i < var_3; ++i) {
comp = (var_6 + +0.0f + (var_7 / logf((+1.9649E20f - atanf((+1.1659E-37f + fabsf(-0.0f / (var_8 + var_9 - +1.8125E35f))))))));
comp = -1.3785E18f + var_10;
comp = tanhf(+1.0128E-41f);
}
if (comp < (var_11 - var_12)) {
float tmp_4 = +1.7497E35f;
float tmp_5 = -0.0f;
comp = tmp_5 / tmp_4 / (var_13 - (var_14 / floorf(atan2f(-1.0374E-44f, atan2f(-1.6392E-41f - var_15 / -0.0f / atanf(var_16 + (+1.0754E35f - var_17 - var_18 * +0.0f)), -1.1466E35f + var_19 * atanf(var_20 / (var_21 / var_22 - floorf(-1.0355E-44f + (+1.3409E-22f * (+1.2695E-35f / -1.2097E34f / -1.5281E-20f))))))))));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23);
cudaDeviceSynchronize();
return 0;
}
|
605737a2765c48f983b1183ed4c99e59899072a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/degridding_layer.hpp"
namespace caffe {
////////////////////////////////////////////////////////////////////////////////////////////
template <typename Dtype>
__global__ void incomplete_gridding_gpu_aoxpb2y(const int n,
const int c_x, const int h_x, const int w_x,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* a, const Dtype* x, const Dtype* b, Dtype* y) {
const int w_z = max(0, w_x - w_k) / w_s + 1;
const int h_z = max(0, h_x - h_k) / h_s + 1;
const int c_z = max(0, c_x - c_k) / c_s + 1;
const int w_y = w_z + (max(0, w_x - w_k) % w_s && w_z * w_s < w_x);
const int h_y = h_z + (max(0, h_x - h_k) % h_s && h_z * h_s < h_x);
const int c_y = c_z + (max(0, c_x - c_k) % c_s && c_z * c_s < c_x);
const int w_a = (w_y - 1) * min(w_k, w_s) + min(w_k, w_x - (w_y - 1) * w_s);
const int h_a = (h_y - 1) * min(h_k, h_s) + min(h_k, h_x - (h_y - 1) * h_s);
const int m_y = n * c_y * h_y * w_y;
CUDA_KERNEL_LOOP(y_i, m_y) {
const int y_w = y_i % w_y;
const int y_h = y_i / w_y % h_y;
const int y_c = y_i / w_y / h_y % c_y;
const int y_n = y_i / w_y / h_y / c_y;
const int b_w = y_w * w_s;
const int b_h = y_h * h_s;
const int b_c = y_c * c_s;
const int e_w = min(b_w + w_k, w_x);
const int e_h = min(b_h + h_k, h_x);
const int e_c = min(b_c + c_k, c_x);
const int b_i = y_i % (c_y * h_y * w_y);
y[y_i] = b ? b[b_i] : 0;
for (int x_c = b_c; x_c < e_c; ++x_c) {
const int a_c = y_c * min(c_k, c_s) + x_c - b_c;
for (int x_h = b_h; x_h < e_h; ++x_h) {
const int a_h = y_h * min(h_k, h_s) + x_h - b_h;
for (int x_w = b_w; x_w < e_w; ++x_w) {
const int a_w = y_w * min(w_k, w_s) + x_w - b_w;
const int a_i = (a_c * h_a + a_h) * w_a + a_w;
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
y[y_i] += a[a_i] * x[x_i];
}
}
}
}
}
template <typename Dtype>
__global__ void incomplete_degridding_gpu_aoxpb2y(const int n,
const int c_y, const int h_y, const int w_y,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* a, const Dtype* x, const Dtype* b, Dtype* y) {
const int w_z = max(0, w_y - w_k) / w_s + 1;
const int h_z = max(0, h_y - h_k) / h_s + 1;
const int c_z = max(0, c_y - c_k) / c_s + 1;
const int w_x = w_z + (max(0, w_y - w_k) % w_s && w_z * w_s < w_y);
const int h_x = h_z + (max(0, h_y - h_k) % h_s && h_z * h_s < h_y);
const int c_x = c_z + (max(0, c_y - c_k) % c_s && c_z * c_s < c_y);
const int w_a = (w_x - 1) * min(w_k, w_s) + min(w_k, w_y - (w_x - 1) * w_s);
const int h_a = (h_x - 1) * min(h_k, h_s) + min(h_k, h_y - (h_x - 1) * h_s);
const int m_y = n * c_y * h_y * w_y;
CUDA_KERNEL_LOOP(y_i, m_y) {
const int y_w = y_i % w_y;
const int y_h = y_i / w_y % h_y;
const int y_c = y_i / w_y / h_y % c_y;
const int y_n = y_i / w_y / h_y / c_y;
const int b_w = (y_w < w_k) ? 0 : ((y_w - w_k) / w_s + 1);
const int b_h = (y_h < h_k) ? 0 : ((y_h - h_k) / h_s + 1);
const int b_c = (y_c < c_k) ? 0 : ((y_c - c_k) / c_s + 1);
const int e_w = min(y_w / w_s + 1, w_x);
const int e_h = min(y_h / h_s + 1, h_x);
const int e_c = min(y_c / c_s + 1, c_x);
const int a_w = y_w - y_w / w_s * max(0, w_s - w_k);
const int a_h = y_h - y_h / h_s * max(0, h_s - h_k);
const int a_c = y_c - y_c / c_s * max(0, c_s - c_k);
const int a_i = (a_c * h_a + a_h) * w_a + a_w;
const int b_i = y_i % (c_y * h_y * w_y);
y[y_i] = b ? b[b_i] : 0;
for (int x_c = b_c; x_c < e_c; ++x_c) {
for (int x_h = b_h; x_h < e_h; ++x_h) {
for (int x_w = b_w; x_w < e_w; ++x_w) {
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
y[y_i] += a[a_i] * x[x_i];
}
}
}
}
}
template <typename Dtype>
__global__ void incomplete_degridding_gpu_xoypb2a(const int n,
const int c_x, const int h_x, const int w_x,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* x, const Dtype* y, const Dtype* b, Dtype* a) {
const int w_z = max(0, w_x - w_k) / w_s + 1;
const int h_z = max(0, h_x - h_k) / h_s + 1;
const int c_z = max(0, c_x - c_k) / c_s + 1;
const int w_y = w_z + (max(0, w_x - w_k) % w_s && w_z * w_s < w_x);
const int h_y = h_z + (max(0, h_x - h_k) % h_s && h_z * h_s < h_x);
const int c_y = c_z + (max(0, c_x - c_k) % c_s && c_z * c_s < c_x);
const int w_a = (w_y - 1) * min(w_k, w_s) + min(w_k, w_x - (w_y - 1) * w_s);
const int h_a = (h_y - 1) * min(h_k, h_s) + min(h_k, h_x - (h_y - 1) * h_s);
const int c_a = (c_y - 1) * min(c_k, c_s) + min(c_k, c_x - (c_y - 1) * c_s);
const int m_a = c_a * h_a * w_a;
CUDA_KERNEL_LOOP(a_i, m_a) {
const int a_w = a_i % w_a;
const int a_h = a_i / w_a % h_a;
const int a_c = a_i / w_a / h_a;
const int x_w = (w_s < w_k) ? a_w : (a_w / w_k * w_s + a_w % w_k);
const int x_h = (h_s < h_k) ? a_h : (a_h / h_k * h_s + a_h % h_k);
const int x_c = (c_s < c_k) ? a_c : (a_c / c_k * c_s + a_c % c_k);
const int b_w = (x_w < w_k) ? 0 : ((x_w - w_k) / w_s + 1);
const int b_h = (x_h < h_k) ? 0 : ((x_h - h_k) / h_s + 1);
const int b_c = (x_c < c_k) ? 0 : ((x_c - c_k) / c_s + 1);
const int e_w = min(x_w / w_s + 1, w_y);
const int e_h = min(x_h / h_s + 1, h_y);
const int e_c = min(x_c / c_s + 1, c_y);
a[a_i] = b ? b[a_i] : 0;
for (int y_n = 0; y_n < n; ++y_n) {
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
for (int y_c = b_c; y_c < e_c; ++y_c) {
for (int y_h = b_h; y_h < e_h; ++y_h) {
for (int y_w = b_w; y_w < e_w; ++y_w) {
const int y_i = ((y_n * c_y + y_c) * h_y + y_h) * w_y + y_w;
a[a_i] += x[x_i] * y[y_i];
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////
template <typename Dtype>
__global__ void complete_gridding_gpu_aoxpb2y(const int n,
const int c_x, const int h_x, const int w_x,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* a, const Dtype* x, const Dtype* b, Dtype* y) {
const int w_z = max(0, w_x - w_k) / w_s + 1;
const int h_z = max(0, h_x - h_k) / h_s + 1;
const int c_z = max(0, c_x - c_k) / c_s + 1;
const int w_y = w_z + (max(0, w_x - w_k) % w_s && w_z * w_s < w_x);
const int h_y = h_z + (max(0, h_x - h_k) % h_s && h_z * h_s < h_x);
const int c_y = c_z + (max(0, c_x - c_k) % c_s && c_z * c_s < c_x);
const int w_a = (w_y - 1) * w_k + min(w_k, w_x - (w_y - 1) * w_s);
const int h_a = (h_y - 1) * h_k + min(h_k, h_x - (h_y - 1) * h_s);
const int m_y = n * c_y * h_y * w_y;
CUDA_KERNEL_LOOP(y_i, m_y) {
const int y_w = y_i % w_y;
const int y_h = y_i / w_y % h_y;
const int y_c = y_i / w_y / h_y % c_y;
const int y_n = y_i / w_y / h_y / c_y;
const int b_w = y_w * w_s;
const int b_h = y_h * h_s;
const int b_c = y_c * c_s;
const int e_w = min(b_w + w_k, w_x);
const int e_h = min(b_h + h_k, h_x);
const int e_c = min(b_c + c_k, c_x);
const int b_i = y_i % (c_y * h_y * w_y);
y[y_i] = b ? b[b_i] : 0;
for (int x_c = b_c; x_c < e_c; ++x_c) {
const int a_c = y_c * c_k + x_c - b_c;
for (int x_h = b_h; x_h < e_h; ++x_h) {
const int a_h = y_h * h_k + x_h - b_h;
for (int x_w = b_w; x_w < e_w; ++x_w) {
const int a_w = y_w * w_k + x_w - b_w;
const int a_i = (a_c * h_a + a_h) * w_a + a_w;
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
y[y_i] += a[a_i] * x[x_i];
}
}
}
}
}
template <typename Dtype>
__global__ void complete_degridding_gpu_aoxpb2y(const int n,
const int c_y, const int h_y, const int w_y,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* a, const Dtype* x, const Dtype* b, Dtype* y) {
const int w_z = max(0, w_y - w_k) / w_s + 1;
const int h_z = max(0, h_y - h_k) / h_s + 1;
const int c_z = max(0, c_y - c_k) / c_s + 1;
const int w_x = w_z + (max(0, w_y - w_k) % w_s && w_z * w_s < w_y);
const int h_x = h_z + (max(0, h_y - h_k) % h_s && h_z * h_s < h_y);
const int c_x = c_z + (max(0, c_y - c_k) % c_s && c_z * c_s < c_y);
const int w_a = (w_x - 1) * w_k + min(w_k, w_y - (w_x - 1) * w_s);
const int h_a = (h_x - 1) * h_k + min(h_k, h_y - (h_x - 1) * h_s);
const int m_y = n * c_y * h_y * w_y;
CUDA_KERNEL_LOOP(y_i, m_y) {
const int y_w = y_i % w_y;
const int y_h = y_i / w_y % h_y;
const int y_c = y_i / w_y / h_y % c_y;
const int y_n = y_i / w_y / h_y / c_y;
const int b_w = (y_w < w_k) ? 0 : ((y_w - w_k) / w_s + 1);
const int b_h = (y_h < h_k) ? 0 : ((y_h - h_k) / h_s + 1);
const int b_c = (y_c < c_k) ? 0 : ((y_c - c_k) / c_s + 1);
const int e_w = min(y_w / w_s + 1, w_x);
const int e_h = min(y_h / h_s + 1, h_x);
const int e_c = min(y_c / c_s + 1, c_x);
const int b_i = y_i % (c_y * h_y * w_y);
y[y_i] = b ? b[b_i] : 0;
for (int x_c = b_c; x_c < e_c; ++x_c) {
const int a_c = y_c + x_c * (c_k - c_s);
for (int x_h = b_h; x_h < e_h; ++x_h) {
const int a_h = y_h + x_h * (h_k - h_s);
for (int x_w = b_w; x_w < e_w; ++x_w) {
const int a_w = y_w + x_w * (w_k - w_s);
const int a_i = (a_c * h_a + a_h) * w_a + a_w;
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
y[y_i] += a[a_i] * x[x_i];
}
}
}
}
}
template <typename Dtype>
__global__ void complete_degridding_gpu_xoypb2a(const int n,
const int c_x, const int h_x, const int w_x,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* x, const Dtype* y, const Dtype* b, Dtype* a) {
const int w_z = max(0, w_x - w_k) / w_s + 1;
const int h_z = max(0, h_x - h_k) / h_s + 1;
const int c_z = max(0, c_x - c_k) / c_s + 1;
const int w_y = w_z + (max(0, w_x - w_k) % w_s && w_z * w_s < w_x);
const int h_y = h_z + (max(0, h_x - h_k) % h_s && h_z * h_s < h_x);
const int c_y = c_z + (max(0, c_x - c_k) % c_s && c_z * c_s < c_x);
const int w_a = (w_y - 1) * w_k + min(w_k, w_x - (w_y - 1) * w_s);
const int h_a = (h_y - 1) * h_k + min(h_k, h_x - (h_y - 1) * h_s);
const int c_a = (c_y - 1) * c_k + min(c_k, c_x - (c_y - 1) * c_s);
const int m_a = c_a * h_a * w_a;
CUDA_KERNEL_LOOP(a_i, m_a) {
const int a_w = a_i % w_a;
const int a_h = a_i / w_a % h_a;
const int a_c = a_i / w_a / h_a;
const int y_w = a_w / w_k;
const int y_h = a_h / h_k;
const int y_c = a_c / c_k;
const int x_w = y_w * w_s + a_w % w_k;
const int x_h = y_h * h_s + a_h % h_k;
const int x_c = y_c * c_s + a_c % c_k;
a[a_i] = b ? b[a_i] : 0;
for (int y_n = 0; y_n < n; ++y_n) {
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
const int y_i = ((y_n * c_y + y_c) * h_y + y_h) * w_y + y_w;
a[a_i] += x[x_i] * y[y_i];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////
template <typename Dtype>
__global__ void shared_gridding_gpu_aoxpb2y(const int n,
const int c_x, const int h_x, const int w_x,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* a, const Dtype* x, const Dtype* b, Dtype* y) {
const int w_z = max(0, w_x - w_k) / w_s + 1;
const int h_z = max(0, h_x - h_k) / h_s + 1;
const int c_z = max(0, c_x - c_k) / c_s + 1;
const int w_y = w_z + (max(0, w_x - w_k) % w_s && w_z * w_s < w_x);
const int h_y = h_z + (max(0, h_x - h_k) % h_s && h_z * h_s < h_x);
const int c_y = c_z + (max(0, c_x - c_k) % c_s && c_z * c_s < c_x);
const int w_a = min(w_k, w_x);
const int h_a = min(h_k, h_x);
const int m_y = n * c_y * h_y * w_y;
CUDA_KERNEL_LOOP(y_i, m_y) {
const int y_w = y_i % w_y;
const int y_h = y_i / w_y % h_y;
const int y_c = y_i / w_y / h_y % c_y;
const int y_n = y_i / w_y / h_y / c_y;
const int b_w = y_w * w_s;
const int b_h = y_h * h_s;
const int b_c = y_c * c_s;
const int e_w = min(b_w + w_k, w_x);
const int e_h = min(b_h + h_k, h_x);
const int e_c = min(b_c + c_k, c_x);
const int b_i = y_i % (y_c * y_h * y_w);
y[y_i] = b ? b[b_i] : 0;
for (int x_c = b_c; x_c < e_c; ++x_c) {
const int a_c = x_c - b_c;
for (int x_h = b_h; x_h < e_h; ++x_h) {
const int a_h = x_h - b_h;
for (int x_w = b_w; x_w < e_w; ++x_w) {
const int a_w = x_w - b_w;
const int a_i = (a_c * h_a + a_h) * w_a + a_w;
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
y[y_i] += a[a_i] * x[x_i];
}
}
}
}
}
template <typename Dtype>
__global__ void shared_degridding_gpu_aoxpb2y(const int n,
const int c_y, const int h_y, const int w_y,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* a, const Dtype* x, const Dtype* b, Dtype* y) {
const int w_z = max(0, w_y - w_k) / w_s + 1;
const int h_z = max(0, h_y - h_k) / h_s + 1;
const int c_z = max(0, c_y - c_k) / c_s + 1;
const int w_x = w_z + (max(0, w_y - w_k) % w_s && w_z * w_s < w_y);
const int h_x = h_z + (max(0, h_y - h_k) % h_s && h_z * h_s < h_y);
const int c_x = c_z + (max(0, c_y - c_k) % c_s && c_z * c_s < c_y);
const int w_a = min(w_k, w_y);
const int h_a = min(h_k, h_y);
const int m_y = n * c_y * h_y * w_y;
CUDA_KERNEL_LOOP(y_i, m_y) {
const int y_w = y_i % w_y;
const int y_h = y_i / w_y % h_y;
const int y_c = y_i / w_y / h_y % c_y;
const int y_n = y_i / w_y / h_y / c_y;
const int b_w = (y_w < w_k) ? 0 : ((y_w - w_k) / w_s + 1);
const int b_h = (y_h < h_k) ? 0 : ((y_h - h_k) / h_s + 1);
const int b_c = (y_c < c_k) ? 0 : ((y_c - c_k) / c_s + 1);
const int e_w = min(y_w / w_s + 1, w_x);
const int e_h = min(y_h / h_s + 1, h_x);
const int e_c = min(y_c / c_s + 1, c_x);
const int b_i = y_i % (c_y * h_y * w_y);
y[y_i] = b ? b[b_i] : 0;
for (int x_c = b_c; x_c < e_c; ++x_c) {
const int a_c = y_c - x_c * c_s;
for (int x_h = b_h; x_h < e_h; ++x_h) {
const int a_h = y_h - x_h * h_s;
for (int x_w = b_w; x_w < e_w; ++x_w) {
const int a_w = y_w - x_w * w_s;
const int a_i = (a_c * h_a + a_h) * w_a + a_w;
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
y[y_i] += a[a_i] * x[x_i];
}
}
}
}
}
template <typename Dtype>
__global__ void shared_degridding_gpu_xoypb2a(const int n,
const int c_x, const int h_x, const int w_x,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* x, const Dtype* y, const Dtype* b, Dtype* a) {
const int w_z = max(0, w_x - w_k) / w_s + 1;
const int h_z = max(0, h_x - h_k) / h_s + 1;
const int c_z = max(0, c_x - c_k) / c_s + 1;
const int w_y = w_z + (max(0, w_x - w_k) % w_s && w_z * w_s < w_x);
const int h_y = h_z + (max(0, h_x - h_k) % h_s && h_z * h_s < h_x);
const int c_y = c_z + (max(0, c_x - c_k) % c_s && c_z * c_s < c_x);
const int w_a = min(w_k, w_x);
const int h_a = min(h_k, h_x);
const int c_a = min(c_k, c_x);
const int m_a = c_a * h_a * w_a;
CUDA_KERNEL_LOOP(a_i, m_a) {
const int a_w = a_i % w_a;
const int a_h = a_i / w_a % h_a;
const int a_c = a_i / w_a / h_a;
a[a_i] = b ? b[a_i] : 0;
for (int y_c = 0, x_c = a_c; x_c < c_x; x_c += c_s, ++y_c) {
for (int y_h = 0, x_h = a_h; x_h < h_x; x_h += h_s, ++y_h) {
for (int y_w = 0, x_w = a_w; x_w < w_x; x_w += w_s, ++y_w) {
for (int y_n = 0; y_n < n; ++y_n) {
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
const int y_i = ((y_n * c_y + y_c) * h_y + y_h) * w_y + y_w;
a[a_i] += x[x_i] * y[y_i];
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////
template <typename Dtype>
__global__ void caffe_gpu_multi_add(const int n,
const int c_x, const int h_x, const int w_x,
const Dtype* x, const Dtype* b, Dtype* y) {
const int m_y = c_x * h_x * w_x;
CUDA_KERNEL_LOOP(y_i, m_y) {
y[y_i] = b ? b[y_i] : 0;
for (int x_n = 0; x_n < n; ++x_n) {
const int x_i = x_n * m_y + y_i;
y[y_i] += x[x_i];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////
template <typename Dtype>
void DegriddingLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const bool preforward_flag) {
const Dtype* bias = bias_term_ ? this->blobs_[1]->gpu_data() : 0;
if (degridding_ == "incomplete_degridding") {
hipLaunchKernelGGL(( incomplete_degridding_gpu_aoxpb2y<Dtype>), dim3(CAFFE_GET_BLOCKS(top[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
this->blobs_[0]->gpu_data(), bottom[0]->gpu_data(), bias, top[0]->mutable_gpu_data());
}
else if (degridding_ == "complete_degridding") {
hipLaunchKernelGGL(( complete_degridding_gpu_aoxpb2y<Dtype>), dim3(CAFFE_GET_BLOCKS(top[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
this->blobs_[0]->gpu_data(), bottom[0]->gpu_data(), bias, top[0]->mutable_gpu_data());
}
else if (degridding_ == "shared_degridding") {
hipLaunchKernelGGL(( shared_degridding_gpu_aoxpb2y<Dtype>), dim3(CAFFE_GET_BLOCKS(top[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
this->blobs_[0]->gpu_data(), bottom[0]->gpu_data(), bias, top[0]->mutable_gpu_data());
}
}
template <typename Dtype>
void DegriddingLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom,
const bool prebackward_flag) {
const Dtype* top_diff = top[0]->gpu_diff();
if (bias_term_ && this->param_propagate_down_[1]) {
hipLaunchKernelGGL(( caffe_gpu_multi_add<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[1]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
top_diff, 0, this->blobs_[1]->mutable_gpu_diff());
}
if (this->param_propagate_down_[0]) {
if (degridding_ == "incomplete_degridding") {
hipLaunchKernelGGL(( incomplete_degridding_gpu_xoypb2a<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
top_diff, bottom[0]->gpu_data(), 0, this->blobs_[0]->mutable_gpu_diff());
}
else if (degridding_ == "complete_degridding") {
hipLaunchKernelGGL(( complete_degridding_gpu_xoypb2a<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
top_diff, bottom[0]->gpu_data(), 0, this->blobs_[0]->mutable_gpu_diff());
}
else if (degridding_ == "shared_degridding") {
hipLaunchKernelGGL(( shared_degridding_gpu_xoypb2a<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
top_diff, bottom[0]->gpu_data(), 0, this->blobs_[0]->mutable_gpu_diff());
}
}
if (propagate_down[0]) {
if (degridding_ == "incomplete_degridding") {
hipLaunchKernelGGL(( incomplete_gridding_gpu_aoxpb2y<Dtype>), dim3(CAFFE_GET_BLOCKS(bottom[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
this->blobs_[0]->gpu_data(), top_diff, 0, bottom[0]->mutable_gpu_diff());
}
else if (degridding_ == "complete_degridding") {
hipLaunchKernelGGL(( complete_gridding_gpu_aoxpb2y<Dtype>), dim3(CAFFE_GET_BLOCKS(bottom[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
this->blobs_[0]->gpu_data(), top_diff, 0, bottom[0]->mutable_gpu_diff());
}
else if (degridding_ == "shared_degridding") {
hipLaunchKernelGGL(( shared_gridding_gpu_aoxpb2y<Dtype>), dim3(CAFFE_GET_BLOCKS(bottom[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
this->blobs_[0]->gpu_data(), top_diff, 0, bottom[0]->mutable_gpu_diff());
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DegriddingLayer);
} // namespace caffe | 605737a2765c48f983b1183ed4c99e59899072a0.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/degridding_layer.hpp"
namespace caffe {
////////////////////////////////////////////////////////////////////////////////////////////
template <typename Dtype>
__global__ void incomplete_gridding_gpu_aoxpb2y(const int n,
const int c_x, const int h_x, const int w_x,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* a, const Dtype* x, const Dtype* b, Dtype* y) {
const int w_z = max(0, w_x - w_k) / w_s + 1;
const int h_z = max(0, h_x - h_k) / h_s + 1;
const int c_z = max(0, c_x - c_k) / c_s + 1;
const int w_y = w_z + (max(0, w_x - w_k) % w_s && w_z * w_s < w_x);
const int h_y = h_z + (max(0, h_x - h_k) % h_s && h_z * h_s < h_x);
const int c_y = c_z + (max(0, c_x - c_k) % c_s && c_z * c_s < c_x);
const int w_a = (w_y - 1) * min(w_k, w_s) + min(w_k, w_x - (w_y - 1) * w_s);
const int h_a = (h_y - 1) * min(h_k, h_s) + min(h_k, h_x - (h_y - 1) * h_s);
const int m_y = n * c_y * h_y * w_y;
CUDA_KERNEL_LOOP(y_i, m_y) {
const int y_w = y_i % w_y;
const int y_h = y_i / w_y % h_y;
const int y_c = y_i / w_y / h_y % c_y;
const int y_n = y_i / w_y / h_y / c_y;
const int b_w = y_w * w_s;
const int b_h = y_h * h_s;
const int b_c = y_c * c_s;
const int e_w = min(b_w + w_k, w_x);
const int e_h = min(b_h + h_k, h_x);
const int e_c = min(b_c + c_k, c_x);
const int b_i = y_i % (c_y * h_y * w_y);
y[y_i] = b ? b[b_i] : 0;
for (int x_c = b_c; x_c < e_c; ++x_c) {
const int a_c = y_c * min(c_k, c_s) + x_c - b_c;
for (int x_h = b_h; x_h < e_h; ++x_h) {
const int a_h = y_h * min(h_k, h_s) + x_h - b_h;
for (int x_w = b_w; x_w < e_w; ++x_w) {
const int a_w = y_w * min(w_k, w_s) + x_w - b_w;
const int a_i = (a_c * h_a + a_h) * w_a + a_w;
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
y[y_i] += a[a_i] * x[x_i];
}
}
}
}
}
template <typename Dtype>
__global__ void incomplete_degridding_gpu_aoxpb2y(const int n,
const int c_y, const int h_y, const int w_y,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* a, const Dtype* x, const Dtype* b, Dtype* y) {
const int w_z = max(0, w_y - w_k) / w_s + 1;
const int h_z = max(0, h_y - h_k) / h_s + 1;
const int c_z = max(0, c_y - c_k) / c_s + 1;
const int w_x = w_z + (max(0, w_y - w_k) % w_s && w_z * w_s < w_y);
const int h_x = h_z + (max(0, h_y - h_k) % h_s && h_z * h_s < h_y);
const int c_x = c_z + (max(0, c_y - c_k) % c_s && c_z * c_s < c_y);
const int w_a = (w_x - 1) * min(w_k, w_s) + min(w_k, w_y - (w_x - 1) * w_s);
const int h_a = (h_x - 1) * min(h_k, h_s) + min(h_k, h_y - (h_x - 1) * h_s);
const int m_y = n * c_y * h_y * w_y;
CUDA_KERNEL_LOOP(y_i, m_y) {
const int y_w = y_i % w_y;
const int y_h = y_i / w_y % h_y;
const int y_c = y_i / w_y / h_y % c_y;
const int y_n = y_i / w_y / h_y / c_y;
const int b_w = (y_w < w_k) ? 0 : ((y_w - w_k) / w_s + 1);
const int b_h = (y_h < h_k) ? 0 : ((y_h - h_k) / h_s + 1);
const int b_c = (y_c < c_k) ? 0 : ((y_c - c_k) / c_s + 1);
const int e_w = min(y_w / w_s + 1, w_x);
const int e_h = min(y_h / h_s + 1, h_x);
const int e_c = min(y_c / c_s + 1, c_x);
const int a_w = y_w - y_w / w_s * max(0, w_s - w_k);
const int a_h = y_h - y_h / h_s * max(0, h_s - h_k);
const int a_c = y_c - y_c / c_s * max(0, c_s - c_k);
const int a_i = (a_c * h_a + a_h) * w_a + a_w;
const int b_i = y_i % (c_y * h_y * w_y);
y[y_i] = b ? b[b_i] : 0;
for (int x_c = b_c; x_c < e_c; ++x_c) {
for (int x_h = b_h; x_h < e_h; ++x_h) {
for (int x_w = b_w; x_w < e_w; ++x_w) {
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
y[y_i] += a[a_i] * x[x_i];
}
}
}
}
}
template <typename Dtype>
__global__ void incomplete_degridding_gpu_xoypb2a(const int n,
const int c_x, const int h_x, const int w_x,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* x, const Dtype* y, const Dtype* b, Dtype* a) {
const int w_z = max(0, w_x - w_k) / w_s + 1;
const int h_z = max(0, h_x - h_k) / h_s + 1;
const int c_z = max(0, c_x - c_k) / c_s + 1;
const int w_y = w_z + (max(0, w_x - w_k) % w_s && w_z * w_s < w_x);
const int h_y = h_z + (max(0, h_x - h_k) % h_s && h_z * h_s < h_x);
const int c_y = c_z + (max(0, c_x - c_k) % c_s && c_z * c_s < c_x);
const int w_a = (w_y - 1) * min(w_k, w_s) + min(w_k, w_x - (w_y - 1) * w_s);
const int h_a = (h_y - 1) * min(h_k, h_s) + min(h_k, h_x - (h_y - 1) * h_s);
const int c_a = (c_y - 1) * min(c_k, c_s) + min(c_k, c_x - (c_y - 1) * c_s);
const int m_a = c_a * h_a * w_a;
CUDA_KERNEL_LOOP(a_i, m_a) {
const int a_w = a_i % w_a;
const int a_h = a_i / w_a % h_a;
const int a_c = a_i / w_a / h_a;
const int x_w = (w_s < w_k) ? a_w : (a_w / w_k * w_s + a_w % w_k);
const int x_h = (h_s < h_k) ? a_h : (a_h / h_k * h_s + a_h % h_k);
const int x_c = (c_s < c_k) ? a_c : (a_c / c_k * c_s + a_c % c_k);
const int b_w = (x_w < w_k) ? 0 : ((x_w - w_k) / w_s + 1);
const int b_h = (x_h < h_k) ? 0 : ((x_h - h_k) / h_s + 1);
const int b_c = (x_c < c_k) ? 0 : ((x_c - c_k) / c_s + 1);
const int e_w = min(x_w / w_s + 1, w_y);
const int e_h = min(x_h / h_s + 1, h_y);
const int e_c = min(x_c / c_s + 1, c_y);
a[a_i] = b ? b[a_i] : 0;
for (int y_n = 0; y_n < n; ++y_n) {
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
for (int y_c = b_c; y_c < e_c; ++y_c) {
for (int y_h = b_h; y_h < e_h; ++y_h) {
for (int y_w = b_w; y_w < e_w; ++y_w) {
const int y_i = ((y_n * c_y + y_c) * h_y + y_h) * w_y + y_w;
a[a_i] += x[x_i] * y[y_i];
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////
template <typename Dtype>
__global__ void complete_gridding_gpu_aoxpb2y(const int n,
const int c_x, const int h_x, const int w_x,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* a, const Dtype* x, const Dtype* b, Dtype* y) {
const int w_z = max(0, w_x - w_k) / w_s + 1;
const int h_z = max(0, h_x - h_k) / h_s + 1;
const int c_z = max(0, c_x - c_k) / c_s + 1;
const int w_y = w_z + (max(0, w_x - w_k) % w_s && w_z * w_s < w_x);
const int h_y = h_z + (max(0, h_x - h_k) % h_s && h_z * h_s < h_x);
const int c_y = c_z + (max(0, c_x - c_k) % c_s && c_z * c_s < c_x);
const int w_a = (w_y - 1) * w_k + min(w_k, w_x - (w_y - 1) * w_s);
const int h_a = (h_y - 1) * h_k + min(h_k, h_x - (h_y - 1) * h_s);
const int m_y = n * c_y * h_y * w_y;
CUDA_KERNEL_LOOP(y_i, m_y) {
const int y_w = y_i % w_y;
const int y_h = y_i / w_y % h_y;
const int y_c = y_i / w_y / h_y % c_y;
const int y_n = y_i / w_y / h_y / c_y;
const int b_w = y_w * w_s;
const int b_h = y_h * h_s;
const int b_c = y_c * c_s;
const int e_w = min(b_w + w_k, w_x);
const int e_h = min(b_h + h_k, h_x);
const int e_c = min(b_c + c_k, c_x);
const int b_i = y_i % (c_y * h_y * w_y);
y[y_i] = b ? b[b_i] : 0;
for (int x_c = b_c; x_c < e_c; ++x_c) {
const int a_c = y_c * c_k + x_c - b_c;
for (int x_h = b_h; x_h < e_h; ++x_h) {
const int a_h = y_h * h_k + x_h - b_h;
for (int x_w = b_w; x_w < e_w; ++x_w) {
const int a_w = y_w * w_k + x_w - b_w;
const int a_i = (a_c * h_a + a_h) * w_a + a_w;
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
y[y_i] += a[a_i] * x[x_i];
}
}
}
}
}
template <typename Dtype>
__global__ void complete_degridding_gpu_aoxpb2y(const int n,
const int c_y, const int h_y, const int w_y,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* a, const Dtype* x, const Dtype* b, Dtype* y) {
const int w_z = max(0, w_y - w_k) / w_s + 1;
const int h_z = max(0, h_y - h_k) / h_s + 1;
const int c_z = max(0, c_y - c_k) / c_s + 1;
const int w_x = w_z + (max(0, w_y - w_k) % w_s && w_z * w_s < w_y);
const int h_x = h_z + (max(0, h_y - h_k) % h_s && h_z * h_s < h_y);
const int c_x = c_z + (max(0, c_y - c_k) % c_s && c_z * c_s < c_y);
const int w_a = (w_x - 1) * w_k + min(w_k, w_y - (w_x - 1) * w_s);
const int h_a = (h_x - 1) * h_k + min(h_k, h_y - (h_x - 1) * h_s);
const int m_y = n * c_y * h_y * w_y;
CUDA_KERNEL_LOOP(y_i, m_y) {
const int y_w = y_i % w_y;
const int y_h = y_i / w_y % h_y;
const int y_c = y_i / w_y / h_y % c_y;
const int y_n = y_i / w_y / h_y / c_y;
const int b_w = (y_w < w_k) ? 0 : ((y_w - w_k) / w_s + 1);
const int b_h = (y_h < h_k) ? 0 : ((y_h - h_k) / h_s + 1);
const int b_c = (y_c < c_k) ? 0 : ((y_c - c_k) / c_s + 1);
const int e_w = min(y_w / w_s + 1, w_x);
const int e_h = min(y_h / h_s + 1, h_x);
const int e_c = min(y_c / c_s + 1, c_x);
const int b_i = y_i % (c_y * h_y * w_y);
y[y_i] = b ? b[b_i] : 0;
for (int x_c = b_c; x_c < e_c; ++x_c) {
const int a_c = y_c + x_c * (c_k - c_s);
for (int x_h = b_h; x_h < e_h; ++x_h) {
const int a_h = y_h + x_h * (h_k - h_s);
for (int x_w = b_w; x_w < e_w; ++x_w) {
const int a_w = y_w + x_w * (w_k - w_s);
const int a_i = (a_c * h_a + a_h) * w_a + a_w;
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
y[y_i] += a[a_i] * x[x_i];
}
}
}
}
}
template <typename Dtype>
__global__ void complete_degridding_gpu_xoypb2a(const int n,
const int c_x, const int h_x, const int w_x,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* x, const Dtype* y, const Dtype* b, Dtype* a) {
const int w_z = max(0, w_x - w_k) / w_s + 1;
const int h_z = max(0, h_x - h_k) / h_s + 1;
const int c_z = max(0, c_x - c_k) / c_s + 1;
const int w_y = w_z + (max(0, w_x - w_k) % w_s && w_z * w_s < w_x);
const int h_y = h_z + (max(0, h_x - h_k) % h_s && h_z * h_s < h_x);
const int c_y = c_z + (max(0, c_x - c_k) % c_s && c_z * c_s < c_x);
const int w_a = (w_y - 1) * w_k + min(w_k, w_x - (w_y - 1) * w_s);
const int h_a = (h_y - 1) * h_k + min(h_k, h_x - (h_y - 1) * h_s);
const int c_a = (c_y - 1) * c_k + min(c_k, c_x - (c_y - 1) * c_s);
const int m_a = c_a * h_a * w_a;
CUDA_KERNEL_LOOP(a_i, m_a) {
const int a_w = a_i % w_a;
const int a_h = a_i / w_a % h_a;
const int a_c = a_i / w_a / h_a;
const int y_w = a_w / w_k;
const int y_h = a_h / h_k;
const int y_c = a_c / c_k;
const int x_w = y_w * w_s + a_w % w_k;
const int x_h = y_h * h_s + a_h % h_k;
const int x_c = y_c * c_s + a_c % c_k;
a[a_i] = b ? b[a_i] : 0;
for (int y_n = 0; y_n < n; ++y_n) {
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
const int y_i = ((y_n * c_y + y_c) * h_y + y_h) * w_y + y_w;
a[a_i] += x[x_i] * y[y_i];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////
template <typename Dtype>
__global__ void shared_gridding_gpu_aoxpb2y(const int n,
const int c_x, const int h_x, const int w_x,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* a, const Dtype* x, const Dtype* b, Dtype* y) {
const int w_z = max(0, w_x - w_k) / w_s + 1;
const int h_z = max(0, h_x - h_k) / h_s + 1;
const int c_z = max(0, c_x - c_k) / c_s + 1;
const int w_y = w_z + (max(0, w_x - w_k) % w_s && w_z * w_s < w_x);
const int h_y = h_z + (max(0, h_x - h_k) % h_s && h_z * h_s < h_x);
const int c_y = c_z + (max(0, c_x - c_k) % c_s && c_z * c_s < c_x);
const int w_a = min(w_k, w_x);
const int h_a = min(h_k, h_x);
const int m_y = n * c_y * h_y * w_y;
CUDA_KERNEL_LOOP(y_i, m_y) {
const int y_w = y_i % w_y;
const int y_h = y_i / w_y % h_y;
const int y_c = y_i / w_y / h_y % c_y;
const int y_n = y_i / w_y / h_y / c_y;
const int b_w = y_w * w_s;
const int b_h = y_h * h_s;
const int b_c = y_c * c_s;
const int e_w = min(b_w + w_k, w_x);
const int e_h = min(b_h + h_k, h_x);
const int e_c = min(b_c + c_k, c_x);
const int b_i = y_i % (y_c * y_h * y_w);
y[y_i] = b ? b[b_i] : 0;
for (int x_c = b_c; x_c < e_c; ++x_c) {
const int a_c = x_c - b_c;
for (int x_h = b_h; x_h < e_h; ++x_h) {
const int a_h = x_h - b_h;
for (int x_w = b_w; x_w < e_w; ++x_w) {
const int a_w = x_w - b_w;
const int a_i = (a_c * h_a + a_h) * w_a + a_w;
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
y[y_i] += a[a_i] * x[x_i];
}
}
}
}
}
template <typename Dtype>
__global__ void shared_degridding_gpu_aoxpb2y(const int n,
const int c_y, const int h_y, const int w_y,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* a, const Dtype* x, const Dtype* b, Dtype* y) {
const int w_z = max(0, w_y - w_k) / w_s + 1;
const int h_z = max(0, h_y - h_k) / h_s + 1;
const int c_z = max(0, c_y - c_k) / c_s + 1;
const int w_x = w_z + (max(0, w_y - w_k) % w_s && w_z * w_s < w_y);
const int h_x = h_z + (max(0, h_y - h_k) % h_s && h_z * h_s < h_y);
const int c_x = c_z + (max(0, c_y - c_k) % c_s && c_z * c_s < c_y);
const int w_a = min(w_k, w_y);
const int h_a = min(h_k, h_y);
const int m_y = n * c_y * h_y * w_y;
CUDA_KERNEL_LOOP(y_i, m_y) {
const int y_w = y_i % w_y;
const int y_h = y_i / w_y % h_y;
const int y_c = y_i / w_y / h_y % c_y;
const int y_n = y_i / w_y / h_y / c_y;
const int b_w = (y_w < w_k) ? 0 : ((y_w - w_k) / w_s + 1);
const int b_h = (y_h < h_k) ? 0 : ((y_h - h_k) / h_s + 1);
const int b_c = (y_c < c_k) ? 0 : ((y_c - c_k) / c_s + 1);
const int e_w = min(y_w / w_s + 1, w_x);
const int e_h = min(y_h / h_s + 1, h_x);
const int e_c = min(y_c / c_s + 1, c_x);
const int b_i = y_i % (c_y * h_y * w_y);
y[y_i] = b ? b[b_i] : 0;
for (int x_c = b_c; x_c < e_c; ++x_c) {
const int a_c = y_c - x_c * c_s;
for (int x_h = b_h; x_h < e_h; ++x_h) {
const int a_h = y_h - x_h * h_s;
for (int x_w = b_w; x_w < e_w; ++x_w) {
const int a_w = y_w - x_w * w_s;
const int a_i = (a_c * h_a + a_h) * w_a + a_w;
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
y[y_i] += a[a_i] * x[x_i];
}
}
}
}
}
template <typename Dtype>
__global__ void shared_degridding_gpu_xoypb2a(const int n,
const int c_x, const int h_x, const int w_x,
const int c_k, const int h_k, const int w_k,
const int c_s, const int h_s, const int w_s,
const Dtype* x, const Dtype* y, const Dtype* b, Dtype* a) {
const int w_z = max(0, w_x - w_k) / w_s + 1;
const int h_z = max(0, h_x - h_k) / h_s + 1;
const int c_z = max(0, c_x - c_k) / c_s + 1;
const int w_y = w_z + (max(0, w_x - w_k) % w_s && w_z * w_s < w_x);
const int h_y = h_z + (max(0, h_x - h_k) % h_s && h_z * h_s < h_x);
const int c_y = c_z + (max(0, c_x - c_k) % c_s && c_z * c_s < c_x);
const int w_a = min(w_k, w_x);
const int h_a = min(h_k, h_x);
const int c_a = min(c_k, c_x);
const int m_a = c_a * h_a * w_a;
CUDA_KERNEL_LOOP(a_i, m_a) {
const int a_w = a_i % w_a;
const int a_h = a_i / w_a % h_a;
const int a_c = a_i / w_a / h_a;
a[a_i] = b ? b[a_i] : 0;
for (int y_c = 0, x_c = a_c; x_c < c_x; x_c += c_s, ++y_c) {
for (int y_h = 0, x_h = a_h; x_h < h_x; x_h += h_s, ++y_h) {
for (int y_w = 0, x_w = a_w; x_w < w_x; x_w += w_s, ++y_w) {
for (int y_n = 0; y_n < n; ++y_n) {
const int x_i = ((y_n * c_x + x_c) * h_x + x_h) * w_x + x_w;
const int y_i = ((y_n * c_y + y_c) * h_y + y_h) * w_y + y_w;
a[a_i] += x[x_i] * y[y_i];
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////
template <typename Dtype>
__global__ void caffe_gpu_multi_add(const int n,
const int c_x, const int h_x, const int w_x,
const Dtype* x, const Dtype* b, Dtype* y) {
const int m_y = c_x * h_x * w_x;
CUDA_KERNEL_LOOP(y_i, m_y) {
y[y_i] = b ? b[y_i] : 0;
for (int x_n = 0; x_n < n; ++x_n) {
const int x_i = x_n * m_y + y_i;
y[y_i] += x[x_i];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////
template <typename Dtype>
void DegriddingLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const bool preforward_flag) {
const Dtype* bias = bias_term_ ? this->blobs_[1]->gpu_data() : 0;
if (degridding_ == "incomplete_degridding") {
incomplete_degridding_gpu_aoxpb2y<Dtype><<<CAFFE_GET_BLOCKS(top[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
this->blobs_[0]->gpu_data(), bottom[0]->gpu_data(), bias, top[0]->mutable_gpu_data());
}
else if (degridding_ == "complete_degridding") {
complete_degridding_gpu_aoxpb2y<Dtype><<<CAFFE_GET_BLOCKS(top[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
this->blobs_[0]->gpu_data(), bottom[0]->gpu_data(), bias, top[0]->mutable_gpu_data());
}
else if (degridding_ == "shared_degridding") {
shared_degridding_gpu_aoxpb2y<Dtype><<<CAFFE_GET_BLOCKS(top[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
this->blobs_[0]->gpu_data(), bottom[0]->gpu_data(), bias, top[0]->mutable_gpu_data());
}
}
template <typename Dtype>
void DegriddingLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom,
const bool prebackward_flag) {
const Dtype* top_diff = top[0]->gpu_diff();
if (bias_term_ && this->param_propagate_down_[1]) {
caffe_gpu_multi_add<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[1]->count()), CAFFE_CUDA_NUM_THREADS>>>(
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
top_diff, 0, this->blobs_[1]->mutable_gpu_diff());
}
if (this->param_propagate_down_[0]) {
if (degridding_ == "incomplete_degridding") {
incomplete_degridding_gpu_xoypb2a<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
top_diff, bottom[0]->gpu_data(), 0, this->blobs_[0]->mutable_gpu_diff());
}
else if (degridding_ == "complete_degridding") {
complete_degridding_gpu_xoypb2a<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
top_diff, bottom[0]->gpu_data(), 0, this->blobs_[0]->mutable_gpu_diff());
}
else if (degridding_ == "shared_degridding") {
shared_degridding_gpu_xoypb2a<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
top_diff, bottom[0]->gpu_data(), 0, this->blobs_[0]->mutable_gpu_diff());
}
}
if (propagate_down[0]) {
if (degridding_ == "incomplete_degridding") {
incomplete_gridding_gpu_aoxpb2y<Dtype><<<CAFFE_GET_BLOCKS(bottom[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
this->blobs_[0]->gpu_data(), top_diff, 0, bottom[0]->mutable_gpu_diff());
}
else if (degridding_ == "complete_degridding") {
complete_gridding_gpu_aoxpb2y<Dtype><<<CAFFE_GET_BLOCKS(bottom[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
this->blobs_[0]->gpu_data(), top_diff, 0, bottom[0]->mutable_gpu_diff());
}
else if (degridding_ == "shared_degridding") {
shared_gridding_gpu_aoxpb2y<Dtype><<<CAFFE_GET_BLOCKS(bottom[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(
top[0]->shape(0), top[0]->shape(1), top[0]->shape(2), top[0]->shape(3),
kernel_c_, kernel_h_, kernel_w_, stride_c_, stride_h_, stride_w_,
this->blobs_[0]->gpu_data(), top_diff, 0, bottom[0]->mutable_gpu_diff());
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DegriddingLayer);
} // namespace caffe |
5ba9e5fa47104229213d0116aca68f0d7cabdae8.hip | // !!! This is a file automatically generated by hipify!!!
#include <Device/cudaOverlayGraph.cuh>
#include <../config.cuh>
//#include "Kernels/WorkEfficient_KernelDispath.cu"
#include <vector>
#include <random>
using std::vector;
// void cudaGNRGraph::FrontierDebug(const int FSize, const int level)
// {
// if (FSize > max_frontier_size)
// __ERROR("Device memory not sufficient to contain the vertices frontier");
// if (CUDA_DEBUG)
// {
// //__CUDA_ERROR("BellmanFord Host");
// // std::cout << "level: " << level << "\tF2Size: " << FSize << std::endl;
// if (CUDA_DEBUG >= 2)
// {
// if (level <= DEBUG_LEVEL)
// {
// node_t *tmpF1 = new node_t[graph.V * 10];
// hipMemcpy(tmpF1, devF1, FSize * sizeof(node_t), hipMemcpyDeviceToHost);
// printf("\n%s=%d\t", "cuda_frontier_level:", level);
// printExt::host::printArray(tmpF1, FSize, " ");
// delete[] tmpF1;
// }
// }
// }
// }
namespace cuda_graph
{
void cudaGNRGraph::WorkEfficient(GraphWeight &graph)
{
//GraphSSSP& graph = (GraphSSSP&)gw;
long long int totalEdges = 0;
float totalTime = 0;
std::cout.setf(std::ios::fixed | std::ios::left);
timer::Timer<timer::HOST> TM_H;
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
std::default_random_engine generator(seed);
std::uniform_int_distribution<> distribution(0, graph.V);
int maxFrontier = std::numeric_limits<int>::min();
std::vector<int> host_frontiers;
int *Sources = new int[1];
if (CHECK_RESULT)
{
dynamic_cast<GraphSSSP &>(graph).BellmanFord_Queue_init();
}
int needCount = 0;
for (int i = 0; i < N_OF_TESTS; i++)
{
// Sources[0] = {N_OF_TESTS == 1 ? 0 : distribution(generator)};
//Sources[0] = 160413;
Sources[0] = TEST_NODES[i];
// printfInt(gp.Orders[Sources[0]],"source_order");
// if(gp.Orders[TEST_NODES[i]]>10 || gp.Orders[TEST_NODES[i]]<5)
// {
// continue;
// }
// if(needCount >= 10)
// break;
// needCount++;
timer_cuda::Timer<timer_cuda::DEVICE> TM_D;
int edgeTraversed = graph.E;
// if (CHECK_TRAVERSED_EDGES)
// {
// graph.BFS_Init();
// graph.BFS(Sources[0]);
// edgeTraversed = graph.BFS_visitedEdges();
// graph.BFS_Reset();
// if (edgeTraversed == 0 || (float)graph.E / edgeTraversed < 0.1f)
// {
// i--;
// std::cout << "EdgeTraversed:" << edgeTraversed
// << " -> Repeat" << std::endl;
// continue;
// }
// }
//printf("host start\n");
if (CHECK_RESULT)
{
//std::cout << "Computing Host Bellman-Ford..." << std::endl;
if (CUDA_DEBUG)
{
//dynamic_cast<GraphSSSP&>(graph).BoostDijkstra(Sources[0]);
dynamic_cast<GraphSSSP &>(graph).BellmanFord_Frontier(Sources[0], host_frontiers);
printf("host_frontiers_size = ");
for (int i = 0; i < (int)host_frontiers.size(); i++)
std::cout << " " << host_frontiers[i];
std::cout << std::endl;
host_frontiers.resize(0);
}
else
{
dynamic_cast<GraphSSSP &>(graph).BellmanFord_Queue(Sources[0]);
}
}
printf("cuda start\n");
printf("source id:%d\n", Sources[0]);
TM_D.start();
GNRSearchMain(Sources[0]);
__CUDA_ERROR("BellmanFord Kernel");
TM_D.stop();
//======================================================================
float time = TM_D.duration();
totalTime += time;
__CUDA_ERROR("BellmanFord Kernel");
totalEdges += edgeTraversed;
if (N_OF_TESTS > 1)
std::cout << "iter: " << std::setw(10) << i
<< "\ttime: " << std::setw(10) << time
<< "\tEdges: " << std::setw(10) << edgeTraversed
<< "\tsource: " << Sources[0] << std::endl;
if (CHECK_RESULT)
{
// printf("the %d test is ok", i);
dist_t *Dist = dynamic_cast<GraphSSSP &>(graph).BellmanFord_Result();
// if (CUDA_DEBUG >= 3)
// {
// printExt::host::printArray(Dist, graph.V, "host_distance:");
// printExt::host::printArray(devArray, graph.V, "cuda_distance:");
// delete[] devArray;
// }
vector<int> devDist(V);
copyDistance(devDist);
//printf("the %d test is ok", i);
int count_error = 0;
for (int j = 0; j < V; j++)
{
if (devDist[j] != Dist[j])
{
count_error++; // exit(-1);
// printf("%d not equal,dev:%d,host:%d,cha:%d\n", j, devDist[j], Dist[j], devDist[j] - Dist[j]);
}
}
printf("the %d test is %f", i, (float)count_error / (float)V);
dynamic_cast<GraphSSSP &>(graph).BellmanFord_Queue_reset();
//TODO chl
}
/*std::cout << "reset start" << std::endl;
std::cout << "reset end" << std::endl;*/
}
if (CHECK_RESULT)
{
dynamic_cast<GraphSSSP &>(graph).BellmanFord_Queue_end();
}
std::cout << std::endl
<< "\tNumber of TESTS: " << N_OF_TESTS << std::endl
<< "\t Avg. Time: " << totalTime / N_OF_TESTS << " ms" << std::endl
<< "\t Avg. MTEPS: " << totalEdges / (totalTime * 1000) << std::endl
<< "\t maxFrontier: " << maxFrontier << std::endl
<< std::endl;
}
} // namespace cuda_graph
| 5ba9e5fa47104229213d0116aca68f0d7cabdae8.cu | #include <Device/cudaOverlayGraph.cuh>
#include <../config.cuh>
//#include "Kernels/WorkEfficient_KernelDispath.cu"
#include <vector>
#include <random>
using std::vector;
// void cudaGNRGraph::FrontierDebug(const int FSize, const int level)
// {
// if (FSize > max_frontier_size)
// __ERROR("Device memory not sufficient to contain the vertices frontier");
// if (CUDA_DEBUG)
// {
// //__CUDA_ERROR("BellmanFord Host");
// // std::cout << "level: " << level << "\tF2Size: " << FSize << std::endl;
// if (CUDA_DEBUG >= 2)
// {
// if (level <= DEBUG_LEVEL)
// {
// node_t *tmpF1 = new node_t[graph.V * 10];
// cudaMemcpy(tmpF1, devF1, FSize * sizeof(node_t), cudaMemcpyDeviceToHost);
// printf("\n%s=%d\t", "cuda_frontier_level:", level);
// printExt::host::printArray(tmpF1, FSize, " ");
// delete[] tmpF1;
// }
// }
// }
// }
namespace cuda_graph
{
void cudaGNRGraph::WorkEfficient(GraphWeight &graph)
{
//GraphSSSP& graph = (GraphSSSP&)gw;
long long int totalEdges = 0;
float totalTime = 0;
std::cout.setf(std::ios::fixed | std::ios::left);
timer::Timer<timer::HOST> TM_H;
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
std::default_random_engine generator(seed);
std::uniform_int_distribution<> distribution(0, graph.V);
int maxFrontier = std::numeric_limits<int>::min();
std::vector<int> host_frontiers;
int *Sources = new int[1];
if (CHECK_RESULT)
{
dynamic_cast<GraphSSSP &>(graph).BellmanFord_Queue_init();
}
int needCount = 0;
for (int i = 0; i < N_OF_TESTS; i++)
{
// Sources[0] = {N_OF_TESTS == 1 ? 0 : distribution(generator)};
//Sources[0] = 160413;
Sources[0] = TEST_NODES[i];
// printfInt(gp.Orders[Sources[0]],"source_order");
// if(gp.Orders[TEST_NODES[i]]>10 || gp.Orders[TEST_NODES[i]]<5)
// {
// continue;
// }
// if(needCount >= 10)
// break;
// needCount++;
timer_cuda::Timer<timer_cuda::DEVICE> TM_D;
int edgeTraversed = graph.E;
// if (CHECK_TRAVERSED_EDGES)
// {
// graph.BFS_Init();
// graph.BFS(Sources[0]);
// edgeTraversed = graph.BFS_visitedEdges();
// graph.BFS_Reset();
// if (edgeTraversed == 0 || (float)graph.E / edgeTraversed < 0.1f)
// {
// i--;
// std::cout << "EdgeTraversed:" << edgeTraversed
// << " -> Repeat" << std::endl;
// continue;
// }
// }
//printf("host start\n");
if (CHECK_RESULT)
{
//std::cout << "Computing Host Bellman-Ford..." << std::endl;
if (CUDA_DEBUG)
{
//dynamic_cast<GraphSSSP&>(graph).BoostDijkstra(Sources[0]);
dynamic_cast<GraphSSSP &>(graph).BellmanFord_Frontier(Sources[0], host_frontiers);
printf("host_frontiers_size = ");
for (int i = 0; i < (int)host_frontiers.size(); i++)
std::cout << " " << host_frontiers[i];
std::cout << std::endl;
host_frontiers.resize(0);
}
else
{
dynamic_cast<GraphSSSP &>(graph).BellmanFord_Queue(Sources[0]);
}
}
printf("cuda start\n");
printf("source id:%d\n", Sources[0]);
TM_D.start();
GNRSearchMain(Sources[0]);
__CUDA_ERROR("BellmanFord Kernel");
TM_D.stop();
//======================================================================
float time = TM_D.duration();
totalTime += time;
__CUDA_ERROR("BellmanFord Kernel");
totalEdges += edgeTraversed;
if (N_OF_TESTS > 1)
std::cout << "iter: " << std::setw(10) << i
<< "\ttime: " << std::setw(10) << time
<< "\tEdges: " << std::setw(10) << edgeTraversed
<< "\tsource: " << Sources[0] << std::endl;
if (CHECK_RESULT)
{
// printf("the %d test is ok", i);
dist_t *Dist = dynamic_cast<GraphSSSP &>(graph).BellmanFord_Result();
// if (CUDA_DEBUG >= 3)
// {
// printExt::host::printArray(Dist, graph.V, "host_distance:");
// printExt::host::printArray(devArray, graph.V, "cuda_distance:");
// delete[] devArray;
// }
vector<int> devDist(V);
copyDistance(devDist);
//printf("the %d test is ok", i);
int count_error = 0;
for (int j = 0; j < V; j++)
{
if (devDist[j] != Dist[j])
{
count_error++; // exit(-1);
// printf("%d not equal,dev:%d,host:%d,cha:%d\n", j, devDist[j], Dist[j], devDist[j] - Dist[j]);
}
}
printf("the %d test is %f", i, (float)count_error / (float)V);
dynamic_cast<GraphSSSP &>(graph).BellmanFord_Queue_reset();
//TODO chl
}
/*std::cout << "reset start" << std::endl;
std::cout << "reset end" << std::endl;*/
}
if (CHECK_RESULT)
{
dynamic_cast<GraphSSSP &>(graph).BellmanFord_Queue_end();
}
std::cout << std::endl
<< "\tNumber of TESTS: " << N_OF_TESTS << std::endl
<< "\t Avg. Time: " << totalTime / N_OF_TESTS << " ms" << std::endl
<< "\t Avg. MTEPS: " << totalEdges / (totalTime * 1000) << std::endl
<< "\t maxFrontier: " << maxFrontier << std::endl
<< std::endl;
}
} // namespace cuda_graph
|
9b3f3f2b626f05b8f86c84fbda1531b95c409220.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* cuCorrFrequency.cu
* define a class to save FFT plans and intermediate data for cross correlation in frequency domain
*/
#include "cuCorrFrequency.h"
#include "cuAmpcorUtil.h"
cuFreqCorrelator::cuFreqCorrelator(int imageNX, int imageNY, int nImages, hipStream_t stream_)
{
int imageSize = imageNX*imageNY;
int fImageSize = imageNX*(imageNY/2+1);
int n[NRANK] ={imageNX, imageNY};
cufft_Error(hipfftPlanMany(&forwardPlan, NRANK, n,
NULL, 1, imageSize,
NULL, 1, fImageSize,
HIPFFT_R2C, nImages));
cufft_Error(hipfftPlanMany(&backwardPlan, NRANK, n,
NULL, 1, fImageSize,
NULL, 1, imageSize,
HIPFFT_C2R, nImages));
stream = stream_;
hipfftSetStream(forwardPlan, stream);
hipfftSetStream(backwardPlan, stream);
workFM = new cuArrays<float2>(imageNX, (imageNY/2+1), nImages);
workFM->allocate();
workFS = new cuArrays<float2>(imageNX, (imageNY/2+1), nImages);
workFS->allocate();
workT = new cuArrays<float> (imageNX, imageNY, nImages);
workT->allocate();
}
cuFreqCorrelator::~cuFreqCorrelator()
{
cufft_Error(hipfftDestroy(forwardPlan));
cufft_Error(hipfftDestroy(backwardPlan));
workFM->deallocate();
workFS->deallocate();
workT->deallocate();
}
void cuFreqCorrelator::execute(cuArrays<float> *templates, cuArrays<float> *images, cuArrays<float> *results)
{
cuArraysCopyPadded(templates, workT, stream);
cufft_Error(hipfftExecR2C(forwardPlan, workT->devData, workFM->devData));
cufft_Error(hipfftExecR2C(forwardPlan, images->devData, workFS->devData));
float coef = 1.0/(images->size);
cuArraysElementMultiplyConjugate(workFM, workFS, coef, stream);
cufft_Error(hipfftExecC2R(backwardPlan, workFM->devData, workT->devData));
cuArraysCopyExtract(workT, results, make_int2(0, 0), stream);
//workT->outputToFile("test",stream);
}
__global__ void cudaKernel_elementMulC(float2 *ainout, float2 *bin, size_t size)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < size) {
hipComplex prod;
prod = cuCmulf(ainout[idx], bin[idx]);
ainout [idx] = prod;
}
}
void cuArraysElementMultiply(cuArrays<float2> *image1, cuArrays<float2> *image2, hipStream_t stream)
{
int size = image1->getSize();
int threadsperblock = NTHREADS;
int blockspergrid = IDIVUP (size, threadsperblock);
hipLaunchKernelGGL(( cudaKernel_elementMulC), dim3(blockspergrid), dim3(threadsperblock), 0, stream, image1->devData, image2->devData, size );
getLastCudaError("cuArraysElementMultiply error\n");
}
inline __device__ float2 cuMulConj(float2 a, float2 b)
{
return make_float2(a.x*b.x + a.y*b.y, -a.y*b.x + a.x*b.y);
}
__global__ void cudaKernel_elementMulConjugate(float2 *ainout, float2 *bin, size_t size, float coef)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < size) {
hipComplex prod;
prod = cuMulConj(ainout[idx], bin[idx]);
ainout [idx] = prod*coef;
}
}
void cuArraysElementMultiplyConjugate(cuArrays<float2> *image1, cuArrays<float2> *image2, float coef, hipStream_t stream)
{
int size = image1->getSize();
int threadsperblock = NTHREADS;
int blockspergrid = IDIVUP (size, threadsperblock);
hipLaunchKernelGGL(( cudaKernel_elementMulConjugate), dim3(blockspergrid), dim3(threadsperblock), 0, stream, image1->devData, image2->devData, size, coef );
getLastCudaError("cuArraysElementMultiply error\n");
}
| 9b3f3f2b626f05b8f86c84fbda1531b95c409220.cu | /*
* cuCorrFrequency.cu
* define a class to save FFT plans and intermediate data for cross correlation in frequency domain
*/
#include "cuCorrFrequency.h"
#include "cuAmpcorUtil.h"
cuFreqCorrelator::cuFreqCorrelator(int imageNX, int imageNY, int nImages, cudaStream_t stream_)
{
int imageSize = imageNX*imageNY;
int fImageSize = imageNX*(imageNY/2+1);
int n[NRANK] ={imageNX, imageNY};
cufft_Error(cufftPlanMany(&forwardPlan, NRANK, n,
NULL, 1, imageSize,
NULL, 1, fImageSize,
CUFFT_R2C, nImages));
cufft_Error(cufftPlanMany(&backwardPlan, NRANK, n,
NULL, 1, fImageSize,
NULL, 1, imageSize,
CUFFT_C2R, nImages));
stream = stream_;
cufftSetStream(forwardPlan, stream);
cufftSetStream(backwardPlan, stream);
workFM = new cuArrays<float2>(imageNX, (imageNY/2+1), nImages);
workFM->allocate();
workFS = new cuArrays<float2>(imageNX, (imageNY/2+1), nImages);
workFS->allocate();
workT = new cuArrays<float> (imageNX, imageNY, nImages);
workT->allocate();
}
cuFreqCorrelator::~cuFreqCorrelator()
{
cufft_Error(cufftDestroy(forwardPlan));
cufft_Error(cufftDestroy(backwardPlan));
workFM->deallocate();
workFS->deallocate();
workT->deallocate();
}
void cuFreqCorrelator::execute(cuArrays<float> *templates, cuArrays<float> *images, cuArrays<float> *results)
{
cuArraysCopyPadded(templates, workT, stream);
cufft_Error(cufftExecR2C(forwardPlan, workT->devData, workFM->devData));
cufft_Error(cufftExecR2C(forwardPlan, images->devData, workFS->devData));
float coef = 1.0/(images->size);
cuArraysElementMultiplyConjugate(workFM, workFS, coef, stream);
cufft_Error(cufftExecC2R(backwardPlan, workFM->devData, workT->devData));
cuArraysCopyExtract(workT, results, make_int2(0, 0), stream);
//workT->outputToFile("test",stream);
}
__global__ void cudaKernel_elementMulC(float2 *ainout, float2 *bin, size_t size)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < size) {
cuComplex prod;
prod = cuCmulf(ainout[idx], bin[idx]);
ainout [idx] = prod;
}
}
void cuArraysElementMultiply(cuArrays<float2> *image1, cuArrays<float2> *image2, cudaStream_t stream)
{
int size = image1->getSize();
int threadsperblock = NTHREADS;
int blockspergrid = IDIVUP (size, threadsperblock);
cudaKernel_elementMulC<<<blockspergrid, threadsperblock, 0, stream>>>(image1->devData, image2->devData, size );
getLastCudaError("cuArraysElementMultiply error\n");
}
inline __device__ float2 cuMulConj(float2 a, float2 b)
{
return make_float2(a.x*b.x + a.y*b.y, -a.y*b.x + a.x*b.y);
}
__global__ void cudaKernel_elementMulConjugate(float2 *ainout, float2 *bin, size_t size, float coef)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < size) {
cuComplex prod;
prod = cuMulConj(ainout[idx], bin[idx]);
ainout [idx] = prod*coef;
}
}
void cuArraysElementMultiplyConjugate(cuArrays<float2> *image1, cuArrays<float2> *image2, float coef, cudaStream_t stream)
{
int size = image1->getSize();
int threadsperblock = NTHREADS;
int blockspergrid = IDIVUP (size, threadsperblock);
cudaKernel_elementMulConjugate<<<blockspergrid, threadsperblock, 0, stream>>>(image1->devData, image2->devData, size, coef );
getLastCudaError("cuArraysElementMultiply error\n");
}
|
99e42cfbf230f357d157b29f8b67c55a33c50178.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <math.h>
#include <helper_cuda.h>
#include <complex>
#include <hipfft.h>
#include "../spreadinterp.h"
#include "../memtransfer.h"
#include "../deconvolve.h"
#include "../cufinufft.h"
using namespace std;
int cufinufft2d1_exec(CUCPX* d_c, CUCPX* d_fk, cufinufft_plan *d_plan)
/*
2D Type-1 NUFFT
This function is called in "exec" stage (See ../cufinufft.cu).
It includes (copied from doc in finufft library)
Step 1: spread data to oversampled regular mesh using kernel
Step 2: compute FFT on uniform mesh
Step 3: deconvolve by division of each Fourier mode independently by the
Fourier series coefficient of the kernel.
Melody Shih 07/25/19
*/
{
assert(d_plan->spopts.spread_direction == 1);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
int blksize;
int ier;
CUCPX* d_fkstart;
CUCPX* d_cstart;
for(int i=0; i*d_plan->ntransfcufftplan < d_plan->ntransf; i++){
blksize = min(d_plan->ntransf - i*d_plan->ntransfcufftplan,
d_plan->ntransfcufftplan);
d_cstart = d_c + i*d_plan->ntransfcufftplan*d_plan->M;
d_fkstart = d_fk + i*d_plan->ntransfcufftplan*d_plan->ms*d_plan->mt;
d_plan->c = d_cstart;
d_plan->fk = d_fkstart;
checkCudaErrors(hipMemset(d_plan->fw,0,d_plan->ntransfcufftplan*
d_plan->nf1*d_plan->nf2*sizeof(CUCPX)));// this is needed
#ifdef TIME
float milliseconds = 0;
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tInitialize fw to 0\t %.3g s\n",
milliseconds/1000);
#endif
// Step 1: Spread
hipEventRecord(start);
ier = cuspread2d(d_plan,blksize);
if(ier != 0 ){
printf("error: cuspread2d, method(%d)\n", d_plan->opts.gpu_method);
return 0;
}
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tSpread (%d)\t\t %.3g s\n", milliseconds/1000,
d_plan->opts.gpu_method);
#endif
// Step 2: FFT
hipEventRecord(start);
CUFFT_EX(d_plan->fftplan, d_plan->fw, d_plan->fw, d_plan->iflag);
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCUFFT Exec\t\t %.3g s\n", milliseconds/1000);
#endif
// Step 3: deconvolve and shuffle
hipEventRecord(start);
cudeconvolve2d(d_plan,blksize);
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tDeconvolve\t\t %.3g s\n", milliseconds/1000);
#endif
}
return ier;
}
int cufinufft2d2_exec(CUCPX* d_c, CUCPX* d_fk, cufinufft_plan *d_plan)
/*
2D Type-2 NUFFT
This function is called in "exec" stage (See ../cufinufft.cu).
It includes (copied from doc in finufft library)
Step 1: deconvolve (amplify) each Fourier mode, dividing by kernel
Fourier coeff
Step 2: compute FFT on uniform mesh
Step 3: interpolate data to regular mesh
Melody Shih 07/25/19
*/
{
assert(d_plan->spopts.spread_direction == 2);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
int blksize;
int ier;
CUCPX* d_fkstart;
CUCPX* d_cstart;
for(int i=0; i*d_plan->ntransfcufftplan < d_plan->ntransf; i++){
blksize = min(d_plan->ntransf - i*d_plan->ntransfcufftplan,
d_plan->ntransfcufftplan);
d_cstart = d_c + i*d_plan->ntransfcufftplan*d_plan->M;
d_fkstart = d_fk + i*d_plan->ntransfcufftplan*d_plan->ms*d_plan->mt;
d_plan->c = d_cstart;
d_plan->fk = d_fkstart;
// Step 1: amplify Fourier coeffs fk and copy into upsampled array fw
hipEventRecord(start);
cudeconvolve2d(d_plan,blksize);
#ifdef TIME
float milliseconds = 0;
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tAmplify & Copy fktofw\t %.3g s\n", milliseconds/1000);
#endif
// Step 2: FFT
hipDeviceSynchronize();
hipEventRecord(start);
CUFFT_EX(d_plan->fftplan, d_plan->fw, d_plan->fw, d_plan->iflag);
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCUFFT Exec\t\t %.3g s\n", milliseconds/1000);
#endif
// Step 3: deconvolve and shuffle
hipEventRecord(start);
ier = cuinterp2d(d_plan, blksize);
if(ier != 0 ){
printf("error: cuinterp2d, method(%d)\n", d_plan->opts.gpu_method);
return 0;
}
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tUnspread (%d)\t\t %.3g s\n", milliseconds/1000,
d_plan->opts.gpu_method);
#endif
}
return ier;
}
| 99e42cfbf230f357d157b29f8b67c55a33c50178.cu | #include <iostream>
#include <iomanip>
#include <math.h>
#include <helper_cuda.h>
#include <complex>
#include <cufft.h>
#include "../spreadinterp.h"
#include "../memtransfer.h"
#include "../deconvolve.h"
#include "../cufinufft.h"
using namespace std;
int cufinufft2d1_exec(CUCPX* d_c, CUCPX* d_fk, cufinufft_plan *d_plan)
/*
2D Type-1 NUFFT
This function is called in "exec" stage (See ../cufinufft.cu).
It includes (copied from doc in finufft library)
Step 1: spread data to oversampled regular mesh using kernel
Step 2: compute FFT on uniform mesh
Step 3: deconvolve by division of each Fourier mode independently by the
Fourier series coefficient of the kernel.
Melody Shih 07/25/19
*/
{
assert(d_plan->spopts.spread_direction == 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int blksize;
int ier;
CUCPX* d_fkstart;
CUCPX* d_cstart;
for(int i=0; i*d_plan->ntransfcufftplan < d_plan->ntransf; i++){
blksize = min(d_plan->ntransf - i*d_plan->ntransfcufftplan,
d_plan->ntransfcufftplan);
d_cstart = d_c + i*d_plan->ntransfcufftplan*d_plan->M;
d_fkstart = d_fk + i*d_plan->ntransfcufftplan*d_plan->ms*d_plan->mt;
d_plan->c = d_cstart;
d_plan->fk = d_fkstart;
checkCudaErrors(cudaMemset(d_plan->fw,0,d_plan->ntransfcufftplan*
d_plan->nf1*d_plan->nf2*sizeof(CUCPX)));// this is needed
#ifdef TIME
float milliseconds = 0;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tInitialize fw to 0\t %.3g s\n",
milliseconds/1000);
#endif
// Step 1: Spread
cudaEventRecord(start);
ier = cuspread2d(d_plan,blksize);
if(ier != 0 ){
printf("error: cuspread2d, method(%d)\n", d_plan->opts.gpu_method);
return 0;
}
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tSpread (%d)\t\t %.3g s\n", milliseconds/1000,
d_plan->opts.gpu_method);
#endif
// Step 2: FFT
cudaEventRecord(start);
CUFFT_EX(d_plan->fftplan, d_plan->fw, d_plan->fw, d_plan->iflag);
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCUFFT Exec\t\t %.3g s\n", milliseconds/1000);
#endif
// Step 3: deconvolve and shuffle
cudaEventRecord(start);
cudeconvolve2d(d_plan,blksize);
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tDeconvolve\t\t %.3g s\n", milliseconds/1000);
#endif
}
return ier;
}
int cufinufft2d2_exec(CUCPX* d_c, CUCPX* d_fk, cufinufft_plan *d_plan)
/*
2D Type-2 NUFFT
This function is called in "exec" stage (See ../cufinufft.cu).
It includes (copied from doc in finufft library)
Step 1: deconvolve (amplify) each Fourier mode, dividing by kernel
Fourier coeff
Step 2: compute FFT on uniform mesh
Step 3: interpolate data to regular mesh
Melody Shih 07/25/19
*/
{
assert(d_plan->spopts.spread_direction == 2);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int blksize;
int ier;
CUCPX* d_fkstart;
CUCPX* d_cstart;
for(int i=0; i*d_plan->ntransfcufftplan < d_plan->ntransf; i++){
blksize = min(d_plan->ntransf - i*d_plan->ntransfcufftplan,
d_plan->ntransfcufftplan);
d_cstart = d_c + i*d_plan->ntransfcufftplan*d_plan->M;
d_fkstart = d_fk + i*d_plan->ntransfcufftplan*d_plan->ms*d_plan->mt;
d_plan->c = d_cstart;
d_plan->fk = d_fkstart;
// Step 1: amplify Fourier coeffs fk and copy into upsampled array fw
cudaEventRecord(start);
cudeconvolve2d(d_plan,blksize);
#ifdef TIME
float milliseconds = 0;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tAmplify & Copy fktofw\t %.3g s\n", milliseconds/1000);
#endif
// Step 2: FFT
cudaDeviceSynchronize();
cudaEventRecord(start);
CUFFT_EX(d_plan->fftplan, d_plan->fw, d_plan->fw, d_plan->iflag);
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCUFFT Exec\t\t %.3g s\n", milliseconds/1000);
#endif
// Step 3: deconvolve and shuffle
cudaEventRecord(start);
ier = cuinterp2d(d_plan, blksize);
if(ier != 0 ){
printf("error: cuinterp2d, method(%d)\n", d_plan->opts.gpu_method);
return 0;
}
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tUnspread (%d)\t\t %.3g s\n", milliseconds/1000,
d_plan->opts.gpu_method);
#endif
}
return ier;
}
|
068c745b7987e631fb8c055a7b0ad3835e7e0e6b.hip | // !!! This is a file automatically generated by hipify!!!
#include "mycuda.h"
#include <iostream>
namespace cuda {
void SetDevice(int device) {
if (hipSetDevice(device) != hipSuccess) {
std::cerr << "hipSetDevice failed! Do you have a CUDA-capable GPU installed?";
throw "e31e20a";
}
}
void Malloc(void** devPtr, size_t bytes)
{
hipError_t cudaStatus = hipMalloc(devPtr, bytes);
if (cudaStatus != hipSuccess) {
std::cerr << "hipMalloc failed: " << hipGetErrorString(cudaStatus) << '\n';
throw "1e32d383";
}
}
void FillZero(void* devPtr, size_t bytes)
{
hipError_t cudaStatus = hipMemset(devPtr, 0, bytes);
if (cudaStatus != hipSuccess) {
std::cerr << "FillZero failed!: " << hipGetErrorString(cudaStatus) << '\n';
throw "798e95c";
}
}
void Memcpy(void* dst, const void* src, size_t bytes, hipMemcpyKind kind)
{
hipError_t cudaStatus = hipMemcpy(dst, src, bytes, kind);
if (cudaStatus != hipSuccess) {
std::cerr << "hipMemcpy failed: " << hipGetErrorString(cudaStatus) << '\n';
throw "ac621fd2";
}
}
void CheckLastError() {
hipError_t cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
std::cerr << "Cuda failed: " << hipGetErrorString(cudaStatus) << '\n';
throw "e502018d";
}
}
void DeviceSynchronize()
{
hipError_t cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
std::cerr << "hipDeviceSynchronize failed: " << hipGetErrorString(cudaStatus) << '\n';
throw "19149f41";
}
}
}/* Namespace cuda */ | 068c745b7987e631fb8c055a7b0ad3835e7e0e6b.cu | #include "mycuda.h"
#include <iostream>
namespace cuda {
void SetDevice(int device) {
if (cudaSetDevice(device) != cudaSuccess) {
std::cerr << "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?";
throw "e31e20a";
}
}
void Malloc(void** devPtr, size_t bytes)
{
cudaError_t cudaStatus = cudaMalloc(devPtr, bytes);
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaMalloc failed: " << cudaGetErrorString(cudaStatus) << '\n';
throw "1e32d383";
}
}
void FillZero(void* devPtr, size_t bytes)
{
cudaError_t cudaStatus = cudaMemset(devPtr, 0, bytes);
if (cudaStatus != cudaSuccess) {
std::cerr << "FillZero failed!: " << cudaGetErrorString(cudaStatus) << '\n';
throw "798e95c";
}
}
void Memcpy(void* dst, const void* src, size_t bytes, cudaMemcpyKind kind)
{
cudaError_t cudaStatus = cudaMemcpy(dst, src, bytes, kind);
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaMemcpy failed: " << cudaGetErrorString(cudaStatus) << '\n';
throw "ac621fd2";
}
}
void CheckLastError() {
cudaError_t cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
std::cerr << "Cuda failed: " << cudaGetErrorString(cudaStatus) << '\n';
throw "e502018d";
}
}
void DeviceSynchronize()
{
cudaError_t cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaDeviceSynchronize failed: " << cudaGetErrorString(cudaStatus) << '\n';
throw "19149f41";
}
}
}/* Namespace cuda */ |
dbd3b690fc96d9fb4e9fe61bf708f504cfd52122.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* An exercise on the different types of memory available in CUDA
*/
#include <iostream>
#include <cstdlib>
// Error checking macro function
#define myCudaCheck(result) { cudaErrorCheck((result), __FILE__, __LINE__); }
inline void cudaErrorCheck(hipError_t err, const char* file, int line)
{
if (err != hipSuccess) {
std::cerr << "CUDA error: " << hipGetErrorString(err) << " at " << file << ":" << line << std::endl;
exit(err);
}
}
// Array size
// HANDSON 2.1 Change the array size to a static __constant__ int
#define ARRAY_SIZE 65536
// CUDA threads per block
#define nThreads 128
// Array reversing kernel
__global__
void reverse(float* devA, float* devB)
{
// HANDSON 2.3 Create a __shared__ temporary array of length nThreads for the swap
// Get the index in this block
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// HANDSON 2.4 Fill the temporary array
// HANDSON 2.5 synchronize the threads
// HANDSON 2.6 Calculate the initial position of this block in the grid
// int blockOffset
// Reverse the elements
// HANDSON 2.7 Fill the output array with the reversed elements from this block
devB[idx] = devA[ARRAY_SIZE - (idx + 1)];
}
// Main host function
int main( )
{
// HANDSON 2.2 Replace the host array size by a const int
// Here and elsewhere
// size of the array in char
size_t sizeChar = ARRAY_SIZE * sizeof(float);
// Allocate host memory
float* hostIn = (float*) malloc(sizeChar);
float* hostOut = (float*) malloc(sizeChar);
// Allocate device memory
float* devIn;
float* devOut;
myCudaCheck(
hipMalloc(&devIn, sizeChar)
);
myCudaCheck(
hipMalloc(&devOut, sizeChar)
);
// Initialize the arrays
for (int i = 0; i < ARRAY_SIZE; i++) {
hostIn[i] = i;
hostOut[i] = 0;
}
// Copy the input array from the host to the device
myCudaCheck(
hipMemcpy(devIn, hostIn, sizeChar, hipMemcpyHostToDevice)
);
// Define the size of the task
dim3 blocksPerGrid(ARRAY_SIZE/nThreads);
dim3 threadsPerBlock(nThreads);
hipLaunchKernelGGL(( reverse), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, devIn, devOut);
// Wait for all threads to complete
myCudaCheck(
hipDeviceSynchronize()
);
// Copy the result array back to the host
myCudaCheck(
hipMemcpy(hostOut, devOut, sizeChar, hipMemcpyDeviceToHost)
);
// Check and print the result
int nCorrect = 0;
for (int i = 0; i < ARRAY_SIZE; i++) {
nCorrect += (hostOut[i] == hostIn[ARRAY_SIZE - (i+1)]) ? 1 : 0;
}
std::cout << ((nCorrect == ARRAY_SIZE) ? "Success! " : "Failure: ");
std::cout << nCorrect << " elements were correctly swapped." << std::endl;
// Free device memory
myCudaCheck(
hipFree(devIn)
);
myCudaCheck(
hipFree(devOut)
);
// Free host memory
free(hostIn);
free(hostOut);
return 0;
} | dbd3b690fc96d9fb4e9fe61bf708f504cfd52122.cu | /*
* An exercise on the different types of memory available in CUDA
*/
#include <iostream>
#include <cstdlib>
// Error checking macro function
#define myCudaCheck(result) { cudaErrorCheck((result), __FILE__, __LINE__); }
inline void cudaErrorCheck(cudaError_t err, const char* file, int line)
{
if (err != cudaSuccess) {
std::cerr << "CUDA error: " << cudaGetErrorString(err) << " at " << file << ":" << line << std::endl;
exit(err);
}
}
// Array size
// HANDSON 2.1 Change the array size to a static __constant__ int
#define ARRAY_SIZE 65536
// CUDA threads per block
#define nThreads 128
// Array reversing kernel
__global__
void reverse(float* devA, float* devB)
{
// HANDSON 2.3 Create a __shared__ temporary array of length nThreads for the swap
// Get the index in this block
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// HANDSON 2.4 Fill the temporary array
// HANDSON 2.5 synchronize the threads
// HANDSON 2.6 Calculate the initial position of this block in the grid
// int blockOffset
// Reverse the elements
// HANDSON 2.7 Fill the output array with the reversed elements from this block
devB[idx] = devA[ARRAY_SIZE - (idx + 1)];
}
// Main host function
int main( )
{
// HANDSON 2.2 Replace the host array size by a const int
// Here and elsewhere
// size of the array in char
size_t sizeChar = ARRAY_SIZE * sizeof(float);
// Allocate host memory
float* hostIn = (float*) malloc(sizeChar);
float* hostOut = (float*) malloc(sizeChar);
// Allocate device memory
float* devIn;
float* devOut;
myCudaCheck(
cudaMalloc(&devIn, sizeChar)
);
myCudaCheck(
cudaMalloc(&devOut, sizeChar)
);
// Initialize the arrays
for (int i = 0; i < ARRAY_SIZE; i++) {
hostIn[i] = i;
hostOut[i] = 0;
}
// Copy the input array from the host to the device
myCudaCheck(
cudaMemcpy(devIn, hostIn, sizeChar, cudaMemcpyHostToDevice)
);
// Define the size of the task
dim3 blocksPerGrid(ARRAY_SIZE/nThreads);
dim3 threadsPerBlock(nThreads);
reverse<<<blocksPerGrid, threadsPerBlock>>>(devIn, devOut);
// Wait for all threads to complete
myCudaCheck(
cudaDeviceSynchronize()
);
// Copy the result array back to the host
myCudaCheck(
cudaMemcpy(hostOut, devOut, sizeChar, cudaMemcpyDeviceToHost)
);
// Check and print the result
int nCorrect = 0;
for (int i = 0; i < ARRAY_SIZE; i++) {
nCorrect += (hostOut[i] == hostIn[ARRAY_SIZE - (i+1)]) ? 1 : 0;
}
std::cout << ((nCorrect == ARRAY_SIZE) ? "Success! " : "Failure: ");
std::cout << nCorrect << " elements were correctly swapped." << std::endl;
// Free device memory
myCudaCheck(
cudaFree(devIn)
);
myCudaCheck(
cudaFree(devOut)
);
// Free host memory
free(hostIn);
free(hostOut);
return 0;
} |
c9ebd78eb0cc8964c0909098f2c91a8052824ff8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// function to add the elements of two arrarys
__global__
void add(int n, float *x, float *y)
{
for(int i = 0; i < n ; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
for(int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, N, x, y);
hipDeviceSynchronize();
float maxError = 0.0f;
for(int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i] - 3.0f));
std::cout << "Max error: " << maxError << std::endl;
//free
//delete [] x;
//delete [] y;
hipFree(x);
hipFree(y);
return 0;
}
| c9ebd78eb0cc8964c0909098f2c91a8052824ff8.cu | #include <iostream>
#include <math.h>
// function to add the elements of two arrarys
__global__
void add(int n, float *x, float *y)
{
for(int i = 0; i < n ; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
for(int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
add<<<1, 1>>>(N, x, y);
cudaDeviceSynchronize();
float maxError = 0.0f;
for(int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i] - 3.0f));
std::cout << "Max error: " << maxError << std::endl;
//free
//delete [] x;
//delete [] y;
cudaFree(x);
cudaFree(y);
return 0;
}
|
fb11cb69183d14f9d661ee992425d74bc6b7d9fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <math.h>
#include <cusolverDn.h>
#include <rocblas.h>
#include <hip/hip_runtime_api.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
// Thread block size
inline void gpuAssert(hipError_t code, const char* file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
double computeGold(float* reference, float* idata, const unsigned int len);
#define DEFAULT_MATRIX_SIZE 1024
#define DEFAULT_THREADS_PER_BLOCK 128
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runSequentialTest(float* A, float* L, const unsigned int dimensionSize);
float runCUDATest_NormalS(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize,
const int threads_per_block, const int cutoff);
float runCUDACholeskyByTheBook(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize,
const int threads_per_block, const int cutoff);
float runCUDACholeskyByTheBookInPlaceM(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize,
const int threads_per_block, const int cutoff);
float runCuSolverTest(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize);
void writeResultToFile(char* name, float* contentGPU, double* contentCPU, int LIST_SIZE);
float computeSyncSingleKarnelOneBlock(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize, const int threads_per_block);
////////////////////////////////////////////////////////////////////////////////
// matrix helper functions
float* spd_create_symetricf(unsigned int dimension, float minValue, float maxValue);
float* spd_make_positive_definitef(float* A, unsigned int dimension, float offset);
float* spd_create_blankf(unsigned int dimension);
float spd_random_float(float fMin, float fMax);
void spd_print_matrixf(float* A, unsigned int dimension, int count);
int spd_compare_matricesf(float* A, float* B, int dimension, float epsilon);
void spd_free_matrixf(float* A);
float* transpose(float* h_Adata, int dimensionSize);
////////////////////////////////////////////////////////////////////////////////
//! Cholesky Kernel for a single column. Normal Single Kernel
//! @param A input data in global memory
//! @param L output data in global memory
//! @param dimensionSize width of matrices
//! @param col current column
////////////////////////////////////////////////////////////////////////////////
__global__ void choleskyByTheBookSingleKarnel(float* A, float* L, int dimensionSize, int col){
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = col + tid;
int k;
float sum = 0;
float value;
float sum_d = 0;
if (tid == 0) {
// diagonal
for (k = 0; k < col; k++) {
sum_d += L[col * dimensionSize + k] * L[col * dimensionSize + k];
}
L[col * dimensionSize + col] = sqrtf(A[col * dimensionSize + col] - sum_d);
}
else {
// other elements
if (row < dimensionSize) {
for (k = 0; k < col; k++) {
sum += L[row * dimensionSize + k] * L[col * dimensionSize + k];
sum_d += L[col * dimensionSize + k] * L[col * dimensionSize + k];
}
value = sqrt(A[col * dimensionSize + col] - sum_d);
L[row * dimensionSize + col] = (1.0 / value * (A[row * dimensionSize + col] - sum));
}
}
}
////////////////////////////////////////////////////////////////////////////////
//! Cholesky Kernels for a single column. Normal Multiple Kernel
//! @param A input data in global memory
//! @param L output data in global memory
//! @param dimensionSize width of matrices
//! @param col current column
////////////////////////////////////////////////////////////////////////////////
__global__ void
choleskyByTheBookMKernelDiagonal(float* A, float* L, int dimensionSize, int col)
{
int k;
float sum_d = 0;
// diagonal
for (k = 0; k < col; k++) {
sum_d += L[col * dimensionSize + k] * L[col * dimensionSize + k];
}
L[col * dimensionSize + col] = sqrtf(A[col * dimensionSize + col] - sum_d);
}
__global__ void
choleskyByTheBookMKernelBelowDiagonal(float* A, float* L, int dimensionSize, int col)
{
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = col + tid + 1;
int k;
float sum = 0;
// other elements
if (row < dimensionSize) {
for (k = 0; k < col; k++) {
sum += L[row * dimensionSize + k] * L[col * dimensionSize + k];
}
L[row * dimensionSize + col] = (1.0 / L[col * dimensionSize + col] *
(A[row * dimensionSize + col] - sum));
}
}
////////////////////////////////////////////////////////////////////////////////
//! Cholesky Kernels for a single column. In-Place Multiple Kernel
//! @param A input/output data in global memory
//! @param dimensionSize width of matrices
//! @param col current column
////////////////////////////////////////////////////////////////////////////////
__global__ void
choleskyByTheBookInPlaceDiagonal(float* A, int dimensionSize, int col)
{
int k;
float sum_d = 0;
// diagonal
for (k = 0; k < col; k++) {
sum_d += A[col * dimensionSize + k] * A[col * dimensionSize + k];
}
A[col * dimensionSize + col] = sqrtf(A[col * dimensionSize + col] - sum_d);
}
__global__ void choleskyByTheBookInPlaceBelowDiagonal(float* A, int dimensionSize, int col)
{
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = col + tid + 1;
int k;
float sum = 0;
// other elements
if (row < dimensionSize) {
for (k = 0; k < col; k++) {
sum += A[row * dimensionSize + k] * A[col * dimensionSize + k];
}
A[row * dimensionSize + col] = (1.0 / A[col * dimensionSize + col] *
(A[row * dimensionSize + col] - sum));
}
}
template <int BLOCK_SIZE> __global__ void chol_kernel_one_block(float* U, unsigned int num_rows)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int tx = tid + bid ;
unsigned int i, j, k;
for (k = 0; k < num_rows; k++)
{
if (tx == 0)
{
U[k * num_rows + k] = sqrt(U[k * num_rows + k]);
for (j = (k + 1); j < num_rows; j++)
{
U[k * num_rows + j] /= U[k * num_rows + k];
}
}
__syncthreads();
for (i = (k + 1) + bid + tid; i < num_rows; i += BLOCK_SIZE )
{
for (j = i; j < num_rows; j++)
{
U[i * num_rows + j] -= U[k * num_rows + i] * U[k * num_rows + j];
}
}
__syncthreads();
}
__syncthreads();
for (i = bid + tid; i < num_rows; i += BLOCK_SIZE )
{
for (j = 0; j < i; j++)
U[i * num_rows + j] = 0.0;
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
int LIST_SIZE = 8;
char* name = (char*)malloc(20 * sizeof(char));
// Read from command line: size, algorithm (Sequential or CUDA), device
float* timersGPU = (float*)malloc(LIST_SIZE * sizeof(float));
double* timersCPU = (double*)malloc(LIST_SIZE * sizeof(double));
unsigned int algorithm = (argc >= 3) ?
atoi(argv[2]) :
0;
unsigned int threads_per_block = (argc >= 4) ?
atoi(argv[3]) :
DEFAULT_THREADS_PER_BLOCK;
unsigned int cutoff = (argc >= 5) ?
atoi(argv[4]) :
0;
unsigned int deviceId = (argc >= 6) ?
atoi(argv[5]) :
0;
// consistency of inputs
if ((algorithm == 0 || algorithm == 4) &&
(threads_per_block != DEFAULT_THREADS_PER_BLOCK || cutoff != 0))
return 0;
// check if tpb and max blocks are compatible with device
for (int index = 0, unsigned int dimensionSize = 32; dimensionSize <= 1024; index++, dimensionSize *= 2) {
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, deviceId);
if (threads_per_block > devProp.maxThreadsPerBlock ||
(ceil((float)(dimensionSize) / (float)threads_per_block) > devProp.maxThreadsDim[0]))
return 0;
if (cutoff >= dimensionSize)
return 0; // if cutoff is greater or equals than the input size, cancel execution
// allocate and initialize host memory for input and output
float* h_Adata = spd_create_symetricf(dimensionSize, 1, 100);
spd_make_positive_definitef(h_Adata, dimensionSize, 50000);
float* h_Ldata = spd_create_blankf(dimensionSize);
// run test, depending on algorithm
switch (algorithm) {
// Sequential
case 0:
name = "Sequential";
printf("%d,Sequential,", dimensionSize);
runSequentialTest(h_Adata, h_Ldata, dimensionSize);
break;
// CUDA Normal Single Kernel
case 1:
name = "SingleKarnel";
printf("%d,CUDA_NormalS,%d,%d,", dimensionSize, threads_per_block, cutoff);
timersGPU[index] = runCUDATest_NormalS(h_Adata, h_Ldata, dimensionSize, threads_per_block, cutoff);
break;
// CUDA Normal Multiple Kernels
case 2:
name = "MultiKarnel";
printf("%d,CUDA_NormalM,%d,%d,", dimensionSize, threads_per_block, cutoff);
timersGPU[index] = runCUDACholeskyByTheBook(h_Adata, h_Ldata, dimensionSize, threads_per_block, cutoff);
break;
// CUDA InPlace Multiple Kernels
case 3:
name = "InPlaceMultiKarnel";
printf("%d,CUDA_InPlaceM,%d,%d,", dimensionSize, threads_per_block, cutoff);
timersGPU[index] = runCUDACholeskyByTheBookInPlaceM(h_Adata, h_Ldata, dimensionSize, threads_per_block, cutoff);
break;
// CuSolver
case 4:
name = "CUSOLVER";
printf("%d,CUSOLVER,", dimensionSize);
timersGPU[index] = runCuSolverTest(h_Adata, h_Ldata, dimensionSize);
break;
case 5:
name = "SyncChols";
printf("%d,SyncChols,%d,%d,", dimensionSize, threads_per_block, cutoff);
timersGPU[index] = computeSyncSingleKarnelOneBlock(h_Adata, h_Ldata, dimensionSize, threads_per_block);
h_Ldata = transpose(h_Ldata, dimensionSize);
break;
break;
}
// compute reference solution
float* h_LGdata = spd_create_blankf(dimensionSize);
timersCPU[index] = computeGold(h_Adata, h_LGdata, dimensionSize);
printf("Input Matrix:\n");
spd_print_matrixf(h_Adata, dimensionSize, 16);
printf("Gold Matrix:\n");
spd_print_matrixf(h_LGdata, dimensionSize, 16);
printf("GPU Output Matrix:\n");
spd_print_matrixf(h_Ldata, dimensionSize, 16);
printf("Comparing ... ");
spd_compare_matricesf(h_Ldata, h_LGdata, dimensionSize, 0.001);
spd_free_matrixf(h_LGdata);
// free matrices
spd_free_matrixf(h_Adata);
spd_free_matrixf(h_Ldata);
}
writeResultToFile(name, timersGPU, timersCPU, LIST_SIZE);
// exit
exit(EXIT_SUCCESS);
}
void writeResultToFile(char* name, float* contentGPU, double* contentCPU,int LIST_SIZE) {
FILE* f = fopen(name, "a");
for (int i = 0; i < LIST_SIZE; i++) {
fprintf(f, "%f, %0.8f \n", contentGPU[i], contentCPU[i]);
}
fprintf(f, "\n");
fclose(f);
}
float* transpose(float* h_Adata,int dimensionSize) {
float* elements = (float*)malloc(dimensionSize * dimensionSize * sizeof(float));
for (int i = 0; i < dimensionSize; i++)
for (int j = 0; j < dimensionSize; j++)
elements[i * dimensionSize + j] = h_Adata[j * dimensionSize + i];
spd_free_matrixf(h_Adata);
return elements;
}
////////////////////////////////////////////////////////////////////////////////
//! Run Tequential test
////////////////////////////////////////////////////////////////////////////////
void runSequentialTest(float* A, float* L, const unsigned int dimensionSize)
{
// initialize timer
clock_t start, end;
double cpu_time_used;
double total_sum = 0;
start = clock();
int i, j, k;
float sum;
for (j = 0; j < dimensionSize; j++) {
sum = 0;
for (k = 0; k < j; k++) {
sum += L[j * dimensionSize + k] * L[j * dimensionSize + k];
}
L[j * dimensionSize + j] = sqrt(A[j * dimensionSize + j] - sum);
for (i = j + 1; i < dimensionSize; i++) {
sum = 0;
for (k = 0; k < j; k++) {
sum += L[i * dimensionSize + k] * L[j * dimensionSize + k];
}
L[i * dimensionSize + j] = (1.0 / L[j * dimensionSize + j] *
(A[i * dimensionSize + j] - sum));
}
}
// stop timer
end = clock();
cpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
printf("%0.8f\n", cpu_time_used);
}
////////////////////////////////////////////////////////////////////////////////
//! Run CUDA test. Normal Single Kernel
////////////////////////////////////////////////////////////////////////////////
float runCUDATest_NormalS(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize,
const int threads_per_block, const int cutoff)
{
// set device id
// initialize timer
hipEvent_t start, stop;
gpuErrchk(hipEventCreate(&start));
gpuErrchk(hipEventCreate(&stop));
gpuErrchk(hipEventRecord(start));
// allocate device memory ...
unsigned int mem_size = sizeof(float) * dimensionSize * dimensionSize;
// ... input
float* d_Adata;
gpuErrchk(hipMalloc((void**)&d_Adata, mem_size));
// copy host memory to device
gpuErrchk(hipMemcpy(d_Adata, h_Adata, mem_size,
hipMemcpyHostToDevice));
// ... output
float* d_Ldata;
gpuErrchk(hipMalloc((void**)&d_Ldata, mem_size));
// execute the kernels
int j;
int num_blocks;
for (j = 0; j < dimensionSize - cutoff; j++) {
num_blocks = ceil((float)(dimensionSize - j) / (float)threads_per_block);
choleskyByTheBookSingleKarnel <<< num_blocks, threads_per_block >> > (d_Adata, d_Ldata, dimensionSize, j);
}
// check if kernel execution generated and error
// copy result from device to host
gpuErrchk(hipMemcpy(h_Ldata, d_Ldata, mem_size,
hipMemcpyDeviceToHost));
// Sequenial part (based on cutoff)
float sum;
int i, k;
for (j = dimensionSize - cutoff; j < dimensionSize; j++) {
// Diagonal value
sum = 0;
for (k = 0; k < j; k++) {
sum += h_Ldata[j * dimensionSize + k] * h_Ldata[j * dimensionSize + k];
}
h_Ldata[j * dimensionSize + j] = sqrt(h_Adata[j * dimensionSize + j] - sum);
// Calculate all other rows
for (i = j + 1; i < dimensionSize; i++) { // for each row below main diagonal
sum = 0;
for (k = 0; k < j; k++) {
sum += h_Ldata[i * dimensionSize + k] * h_Ldata[j * dimensionSize + k];
}
h_Ldata[i * dimensionSize + j] = (1.0 / h_Ldata[j * dimensionSize + j] *
(h_Adata[i * dimensionSize + j] - sum));
}
}
// stop timer
gpuErrchk(hipEventRecord(stop));
gpuErrchk(hipEventSynchronize(stop));
float msecTotal = 0.0f;
gpuErrchk(hipEventElapsedTime(&msecTotal, start, stop));
float timerResoultList = msecTotal / 1000;
printf("%0.8f\n", timerResoultList);
// cleanup memory
gpuErrchk(hipFree(d_Adata));
gpuErrchk(hipFree(d_Ldata));
return timerResoultList;
}
////////////////////////////////////////////////////////////////////////////////
//! Run CUDA test. Normal Multiple Kernel
////////////////////////////////////////////////////////////////////////////////
float runCUDACholeskyByTheBook(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize, const int threads_per_block, const int cutoff){
hipEvent_t start, stop;
gpuErrchk(hipEventCreate(&start));
gpuErrchk(hipEventCreate(&stop));
gpuErrchk(hipEventRecord(start));
// allocate device memory ...
unsigned int mem_size = sizeof(float) * dimensionSize * dimensionSize;
// ... input
float* d_Adata;
gpuErrchk(hipMalloc((void**)&d_Adata, mem_size));
// copy host memory to device
gpuErrchk(hipMemcpy(d_Adata, h_Adata, mem_size,
hipMemcpyHostToDevice));
// ... output
float* d_Ldata;
gpuErrchk(hipMalloc((void**)&d_Ldata, mem_size));
// execute the kernels
int j;
int num_blocks;
if (cutoff > 0) {
// some processing will be on host
for (j = 0; j < dimensionSize - cutoff; j++) {
num_blocks = ceil((float)(dimensionSize - j) / (float)threads_per_block);
choleskyByTheBookMKernelDiagonal <<< 1, 1 >> > (d_Adata, d_Ldata, dimensionSize, j);
choleskyByTheBookMKernelBelowDiagonal <<< num_blocks, threads_per_block >> > (d_Adata, d_Ldata, dimensionSize, j);
}
}
else {
// cutoff = 0, all processing will be on GPU
for (j = 0; j < dimensionSize - 1; j++) {
num_blocks = ceil((float)(dimensionSize - j) / (float)threads_per_block);
choleskyByTheBookMKernelDiagonal <<< 1, 1 >> > (d_Adata, d_Ldata, dimensionSize, j);
choleskyByTheBookMKernelBelowDiagonal <<< num_blocks, threads_per_block >> > (d_Adata, d_Ldata, dimensionSize, j);
}
choleskyByTheBookMKernelDiagonal <<< 1, 1 >> > (d_Adata, d_Ldata, dimensionSize, j);
}
// check if kernel execution generated and error
// copy result from device to host
gpuErrchk(hipMemcpy(h_Ldata, d_Ldata, mem_size,
hipMemcpyDeviceToHost));
// Sequenial part (based on cutoff)
float sum;
int i, k;
for (j = dimensionSize - cutoff; j < dimensionSize; j++) {
// Diagonal value
sum = 0;
for (k = 0; k < j; k++) {
sum += h_Ldata[j * dimensionSize + k] * h_Ldata[j * dimensionSize + k];
}
h_Ldata[j * dimensionSize + j] = sqrt(h_Adata[j * dimensionSize + j] - sum);
// Calculate all other rows
for (i = j + 1; i < dimensionSize; i++) { // for each row below main diagonal
sum = 0;
for (k = 0; k < j; k++) {
sum += h_Ldata[i * dimensionSize + k] * h_Ldata[j * dimensionSize + k];
}
h_Ldata[i * dimensionSize + j] = (1.0 / h_Ldata[j * dimensionSize + j] *
(h_Adata[i * dimensionSize + j] - sum));
}
}
// stop timer
gpuErrchk(hipEventRecord(stop));
gpuErrchk(hipEventSynchronize(stop));
float msecTotal = 0.0f;
gpuErrchk(hipEventElapsedTime(&msecTotal, start, stop));
float timerResoultList = msecTotal / 1000;
printf("%0.8f\n", timerResoultList);
// cleanup memory
gpuErrchk(hipFree(d_Adata));
gpuErrchk(hipFree(d_Ldata));
return timerResoultList;
}
float computeSyncSingleKarnelOneBlock(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize ,const int threads_per_block) {
float* d_Adata;
hipEvent_t start, stop;
unsigned int mem_size = sizeof(float) * dimensionSize * dimensionSize;
gpuErrchk(hipMalloc((void**)&d_Adata, mem_size));
gpuErrchk(hipMemcpy(d_Adata, h_Adata, mem_size,
hipMemcpyHostToDevice));
gpuErrchk(hipEventCreate(&start));
gpuErrchk(hipEventCreate(&stop));
gpuErrchk(hipEventRecord(start));
//Operations per thread
int num_blocks = ceil((float)(dimensionSize) / (float)dimensionSize);
//float ops_per_thread = dimensionSize / (threads_per_block * num_blocks);
dim3 thread_block(dimensionSize, 1, 1);
dim3 grid(num_blocks, 1);
if (dimensionSize == 32) {
chol_kernel_one_block<32> << <grid, thread_block >> > (d_Adata, dimensionSize);
}
else if (dimensionSize == 64) {
chol_kernel_one_block<64> << <grid, thread_block >> > (d_Adata, dimensionSize);
}
else if (dimensionSize == 128) {
chol_kernel_one_block<128> << <grid, thread_block >> > (d_Adata, dimensionSize);
}
else if (dimensionSize == 256) {
chol_kernel_one_block<256> << <grid, thread_block >> > (d_Adata, dimensionSize);
}
else if (dimensionSize == 512) {
chol_kernel_one_block<512> << <grid, thread_block >> > (d_Adata, dimensionSize);
}
else if (dimensionSize == 1024) {
chol_kernel_one_block<1024> << <grid, thread_block >> > (d_Adata, dimensionSize);
}
else if (dimensionSize == 2048) {
chol_kernel_one_block<2048> << <grid, thread_block >> > (d_Adata, dimensionSize);
}
hipDeviceSynchronize();
gpuErrchk(hipEventRecord(stop));
gpuErrchk(hipEventSynchronize(stop));
float msecTotal = 0.0f;
gpuErrchk(hipEventElapsedTime(&msecTotal, start, stop));
float timerResoultList = msecTotal / 1000;
// copy result from device to host
gpuErrchk(hipMemcpy(h_Ldata, d_Adata, mem_size, hipMemcpyDeviceToHost));
gpuErrchk(hipFree(d_Adata));
return timerResoultList;
}
////////////////////////////////////////////////////////////////////////////////
//! Run CUDA test. In-Place Multiple Kernel
////////////////////////////////////////////////////////////////////////////////
float runCUDACholeskyByTheBookInPlaceM(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize, const int threads_per_block, const int cutoff){
hipEvent_t start, stop;
gpuErrchk(hipEventCreate(&start));
gpuErrchk(hipEventCreate(&stop));
gpuErrchk(hipEventRecord(start));
// allocate device memory ...
unsigned int mem_size = sizeof(float) * dimensionSize * dimensionSize;
// ... input/output
float* d_Adata;
gpuErrchk(hipMalloc((void**)&d_Adata, mem_size));
// copy host memory to device
gpuErrchk(hipMemcpy(d_Adata, h_Adata, mem_size,
hipMemcpyHostToDevice));
// execute the kernels
int j;
int num_blocks;
if (cutoff > 0) {
for (j = 0; j < dimensionSize - cutoff; j++) {
num_blocks = ceil((float)(dimensionSize - j) / (float)threads_per_block);
choleskyByTheBookInPlaceDiagonal <<< 1, 1 >> > (d_Adata, dimensionSize, j);
choleskyByTheBookInPlaceBelowDiagonal << < num_blocks, threads_per_block >> > (d_Adata, dimensionSize, j);
}
}
else {
for (j = 0; j < dimensionSize - 1; j++) {
num_blocks = ceil((float)(dimensionSize - j) / (float)threads_per_block);
choleskyByTheBookInPlaceDiagonal <<< 1, 1 >> > (d_Adata, dimensionSize, j);
choleskyByTheBookInPlaceBelowDiagonal <<< num_blocks, threads_per_block >> > (d_Adata, dimensionSize, j);
}
choleskyByTheBookInPlaceDiagonal <<< 1, 1 >> > (d_Adata, dimensionSize, j);
}
// copy result from device to host
gpuErrchk(hipMemcpy(h_Ldata, d_Adata, mem_size,
hipMemcpyDeviceToHost));
// reset rest of matrix
int i;
for (i = 0; i < dimensionSize; i++) {
for (j = 0; j < i; j++) {
h_Ldata[j * dimensionSize + i] = 0;
}
}
// Sequenial part (based on cutoff)
float sum;
int k;
for (j = dimensionSize - cutoff; j < dimensionSize; j++) {
// Diagonal value
sum = 0;
for (k = 0; k < j; k++) {
sum += h_Ldata[j * dimensionSize + k] * h_Ldata[j * dimensionSize + k];
}
h_Ldata[j * dimensionSize + j] = sqrt(h_Ldata[j * dimensionSize + j] - sum);
// Calculate all other rows
for (i = j + 1; i < dimensionSize; i++) { // for each row below main diagonal
sum = 0;
for (k = 0; k < j; k++) {
sum += h_Ldata[i * dimensionSize + k] * h_Ldata[j * dimensionSize + k];
}
h_Ldata[i * dimensionSize + j] = (1.0 / h_Ldata[j * dimensionSize + j] *
(h_Ldata[i * dimensionSize + j] - sum));
}
}
// stop timer
gpuErrchk(hipEventRecord(stop));
gpuErrchk(hipEventSynchronize(stop));
float msecTotal = 0.0f;
gpuErrchk(hipEventElapsedTime(&msecTotal, start, stop));
float timerResoultList = msecTotal / 1000;
printf("%0.8f\n", timerResoultList );
// cleanup memory
gpuErrchk(hipFree(d_Adata));
return timerResoultList;
}
////////////////////////////////////////////////////////////////////////////////
//! Run cuSolver test
////////////////////////////////////////////////////////////////////////////////
float runCuSolverTest(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize)
{
// initialize timer
hipEvent_t start, stop;
gpuErrchk(hipEventCreate(&start));
gpuErrchk(hipEventCreate(&stop));
gpuErrchk(hipEventRecord(start));
// allocate device memory ...
unsigned int mem_size = sizeof(float) * dimensionSize * dimensionSize;
// ... input
float* d_Adata;
gpuErrchk(hipMalloc((void**)&d_Adata, mem_size));
// copy host memory to device
gpuErrchk(hipMemcpy(d_Adata, h_Adata, mem_size,
hipMemcpyHostToDevice));
// init cusolver varialbes
int work_size = 0;
int* devInfo;
gpuErrchk(hipMalloc(&devInfo, sizeof(int)));
// initialize cusolver handle
hipsolverDnHandle_t solver_handle;
hipsolverDnCreate(&solver_handle);
// initialize Spotrf
hipsolverDnSpotrf_bufferSize(solver_handle, HIPBLAS_FILL_MODE_UPPER, dimensionSize, d_Adata, dimensionSize, &work_size);
// execute Cholesky on device (potrf)
float* work;
gpuErrchk(hipMalloc(&work, work_size * sizeof(float)));
hipsolverDnSpotrf(solver_handle, HIPBLAS_FILL_MODE_UPPER, dimensionSize, d_Adata, dimensionSize, work, work_size, devInfo);
int devInfo_h = 0;
gpuErrchk(hipMemcpy(&devInfo_h, devInfo, sizeof(int), hipMemcpyDeviceToHost));
if (devInfo_h != 0) {
printf("Unsuccessful potrf execution\n\n");
}
// copy result from device to host (copy to output)
gpuErrchk(hipMemcpy(h_Ldata, d_Adata, mem_size,
hipMemcpyDeviceToHost));
// reset rest of matrix
int i, j;
for (i = 0; i < dimensionSize; i++) {
for (j = 0; j < i; j++) {
h_Ldata[j * dimensionSize + i] = 0;
}
}
// destroy cuSolver handle
hipsolverDnDestroy(solver_handle);
// stop timer
gpuErrchk(hipEventRecord(stop));
gpuErrchk(hipEventSynchronize(stop));
float msecTotal = 0.0f;
gpuErrchk(hipEventElapsedTime(&msecTotal, start, stop));
float timerResoultList = msecTotal / 1000;
printf("%0.8f\n", timerResoultList );
// cleanup memory
gpuErrchk(hipFree(d_Adata));
return timerResoultList;
}
////////////////////////////////////////////////////////////////////////////////
// matrix helper functions
float* spd_create_symetricf(unsigned int dimension, float minValue, float maxValue)
{
float* m = (float*)calloc(dimension * dimension, sizeof(float));
unsigned int i, j;
for (i = 0; i < dimension; i++) {
for (j = 0; j <= i; j++) {
m[i * dimension + j] = spd_random_float(minValue, maxValue);
m[j * dimension + i] = m[i * dimension + j];
}
}
return m;
}
float* spd_make_positive_definitef(float* A, unsigned int dimension, float offset)
{
unsigned int i;
for (i = 0; i < dimension; i++) // A = A + n*I(n);
A[i * dimension + i] = A[i * dimension + i] + offset;
return A;
}
float* spd_create_blankf(unsigned int dimension)
{
float* m = (float*)calloc(dimension * dimension, sizeof(float));
unsigned int i;
for (i = 0; i < dimension * dimension; i++)
m[i] = 0;
return m;
}
float spd_random_float(float fMin, float fMax)
{
float f = (float)rand() / RAND_MAX;
return fMin + f * (fMax - fMin);
}
void spd_print_matrixf(float* A, unsigned int dimension, int count)
{
unsigned int i, j;
if (dimension < count)
count = dimension;
for (i = 0; i < count; i++)
{
for (j = 0; j < count; j++)
{
printf("%0.2f\t", A[i * dimension + j]);
}
printf("\n");
}
}
int spd_compare_matricesf(float* A, float* B, int dimension, float epsilon)
{
int correct = 1;
int errors = 0;
int i, j;
for (i = 0; i < dimension; i++) {
for (j = 0; j < dimension; j++) {
if (fabs(A[i * dimension + j] - B[i * dimension + j]) > epsilon) {
if (correct)
printf(" (%d, %d): %0.5f != %0.5f\n", i, j, A[i * dimension + j], B[i * dimension + j]);
correct = 0;
errors++;
}
}
}
printf(" Total errors: %d\n", errors);
return errors;
}
void spd_free_matrixf(float* A)
{
free(A);
}
double computeGold(float* A, float* L, const unsigned int dimensionSize)
{
clock_t start, end;
double cpu_time_used;
double total_sum = 0;
start = clock();
int i, j, k;
float sum;
for (j = 0; j < dimensionSize; j++) {
sum = 0;
for (k = 0; k < j; k++) {
sum += L[j * dimensionSize + k] * L[j * dimensionSize + k];
}
L[j * dimensionSize + j] = sqrt(A[j * dimensionSize + j] - sum);
for (i = j + 1; i < dimensionSize; i++) {
sum = 0;
for (k = 0; k < j; k++) {
sum += L[i * dimensionSize + k] * L[j * dimensionSize + k];
}
L[i * dimensionSize + j] = (1.0 / L[j * dimensionSize + j] *
(A[i * dimensionSize + j] - sum));
}
}
end = clock();
cpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
printf("%0.8f\n", cpu_time_used);
return cpu_time_used;
}
| fb11cb69183d14f9d661ee992425d74bc6b7d9fe.cu |
#include "device_launch_parameters.h"
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <math.h>
#include <cusolverDn.h>
#include <cublas_v2.h>
#include <cuda_runtime_api.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
// Thread block size
inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
double computeGold(float* reference, float* idata, const unsigned int len);
#define DEFAULT_MATRIX_SIZE 1024
#define DEFAULT_THREADS_PER_BLOCK 128
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runSequentialTest(float* A, float* L, const unsigned int dimensionSize);
float runCUDATest_NormalS(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize,
const int threads_per_block, const int cutoff);
float runCUDACholeskyByTheBook(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize,
const int threads_per_block, const int cutoff);
float runCUDACholeskyByTheBookInPlaceM(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize,
const int threads_per_block, const int cutoff);
float runCuSolverTest(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize);
void writeResultToFile(char* name, float* contentGPU, double* contentCPU, int LIST_SIZE);
float computeSyncSingleKarnelOneBlock(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize, const int threads_per_block);
////////////////////////////////////////////////////////////////////////////////
// matrix helper functions
float* spd_create_symetricf(unsigned int dimension, float minValue, float maxValue);
float* spd_make_positive_definitef(float* A, unsigned int dimension, float offset);
float* spd_create_blankf(unsigned int dimension);
float spd_random_float(float fMin, float fMax);
void spd_print_matrixf(float* A, unsigned int dimension, int count);
int spd_compare_matricesf(float* A, float* B, int dimension, float epsilon);
void spd_free_matrixf(float* A);
float* transpose(float* h_Adata, int dimensionSize);
////////////////////////////////////////////////////////////////////////////////
//! Cholesky Kernel for a single column. Normal Single Kernel
//! @param A input data in global memory
//! @param L output data in global memory
//! @param dimensionSize width of matrices
//! @param col current column
////////////////////////////////////////////////////////////////////////////////
__global__ void choleskyByTheBookSingleKarnel(float* A, float* L, int dimensionSize, int col){
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = col + tid;
int k;
float sum = 0;
float value;
float sum_d = 0;
if (tid == 0) {
// diagonal
for (k = 0; k < col; k++) {
sum_d += L[col * dimensionSize + k] * L[col * dimensionSize + k];
}
L[col * dimensionSize + col] = sqrtf(A[col * dimensionSize + col] - sum_d);
}
else {
// other elements
if (row < dimensionSize) {
for (k = 0; k < col; k++) {
sum += L[row * dimensionSize + k] * L[col * dimensionSize + k];
sum_d += L[col * dimensionSize + k] * L[col * dimensionSize + k];
}
value = sqrt(A[col * dimensionSize + col] - sum_d);
L[row * dimensionSize + col] = (1.0 / value * (A[row * dimensionSize + col] - sum));
}
}
}
////////////////////////////////////////////////////////////////////////////////
//! Cholesky Kernels for a single column. Normal Multiple Kernel
//! @param A input data in global memory
//! @param L output data in global memory
//! @param dimensionSize width of matrices
//! @param col current column
////////////////////////////////////////////////////////////////////////////////
__global__ void
choleskyByTheBookMKernelDiagonal(float* A, float* L, int dimensionSize, int col)
{
int k;
float sum_d = 0;
// diagonal
for (k = 0; k < col; k++) {
sum_d += L[col * dimensionSize + k] * L[col * dimensionSize + k];
}
L[col * dimensionSize + col] = sqrtf(A[col * dimensionSize + col] - sum_d);
}
__global__ void
choleskyByTheBookMKernelBelowDiagonal(float* A, float* L, int dimensionSize, int col)
{
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = col + tid + 1;
int k;
float sum = 0;
// other elements
if (row < dimensionSize) {
for (k = 0; k < col; k++) {
sum += L[row * dimensionSize + k] * L[col * dimensionSize + k];
}
L[row * dimensionSize + col] = (1.0 / L[col * dimensionSize + col] *
(A[row * dimensionSize + col] - sum));
}
}
////////////////////////////////////////////////////////////////////////////////
//! Cholesky Kernels for a single column. In-Place Multiple Kernel
//! @param A input/output data in global memory
//! @param dimensionSize width of matrices
//! @param col current column
////////////////////////////////////////////////////////////////////////////////
__global__ void
choleskyByTheBookInPlaceDiagonal(float* A, int dimensionSize, int col)
{
int k;
float sum_d = 0;
// diagonal
for (k = 0; k < col; k++) {
sum_d += A[col * dimensionSize + k] * A[col * dimensionSize + k];
}
A[col * dimensionSize + col] = sqrtf(A[col * dimensionSize + col] - sum_d);
}
__global__ void choleskyByTheBookInPlaceBelowDiagonal(float* A, int dimensionSize, int col)
{
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = col + tid + 1;
int k;
float sum = 0;
// other elements
if (row < dimensionSize) {
for (k = 0; k < col; k++) {
sum += A[row * dimensionSize + k] * A[col * dimensionSize + k];
}
A[row * dimensionSize + col] = (1.0 / A[col * dimensionSize + col] *
(A[row * dimensionSize + col] - sum));
}
}
template <int BLOCK_SIZE> __global__ void chol_kernel_one_block(float* U, unsigned int num_rows)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int tx = tid + bid ;
unsigned int i, j, k;
for (k = 0; k < num_rows; k++)
{
if (tx == 0)
{
U[k * num_rows + k] = sqrt(U[k * num_rows + k]);
for (j = (k + 1); j < num_rows; j++)
{
U[k * num_rows + j] /= U[k * num_rows + k];
}
}
__syncthreads();
for (i = (k + 1) + bid + tid; i < num_rows; i += BLOCK_SIZE )
{
for (j = i; j < num_rows; j++)
{
U[i * num_rows + j] -= U[k * num_rows + i] * U[k * num_rows + j];
}
}
__syncthreads();
}
__syncthreads();
for (i = bid + tid; i < num_rows; i += BLOCK_SIZE )
{
for (j = 0; j < i; j++)
U[i * num_rows + j] = 0.0;
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
int LIST_SIZE = 8;
char* name = (char*)malloc(20 * sizeof(char));
// Read from command line: size, algorithm (Sequential or CUDA), device
float* timersGPU = (float*)malloc(LIST_SIZE * sizeof(float));
double* timersCPU = (double*)malloc(LIST_SIZE * sizeof(double));
unsigned int algorithm = (argc >= 3) ?
atoi(argv[2]) :
0;
unsigned int threads_per_block = (argc >= 4) ?
atoi(argv[3]) :
DEFAULT_THREADS_PER_BLOCK;
unsigned int cutoff = (argc >= 5) ?
atoi(argv[4]) :
0;
unsigned int deviceId = (argc >= 6) ?
atoi(argv[5]) :
0;
// consistency of inputs
if ((algorithm == 0 || algorithm == 4) &&
(threads_per_block != DEFAULT_THREADS_PER_BLOCK || cutoff != 0))
return 0;
// check if tpb and max blocks are compatible with device
for (int index = 0, unsigned int dimensionSize = 32; dimensionSize <= 1024; index++, dimensionSize *= 2) {
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, deviceId);
if (threads_per_block > devProp.maxThreadsPerBlock ||
(ceil((float)(dimensionSize) / (float)threads_per_block) > devProp.maxThreadsDim[0]))
return 0;
if (cutoff >= dimensionSize)
return 0; // if cutoff is greater or equals than the input size, cancel execution
// allocate and initialize host memory for input and output
float* h_Adata = spd_create_symetricf(dimensionSize, 1, 100);
spd_make_positive_definitef(h_Adata, dimensionSize, 50000);
float* h_Ldata = spd_create_blankf(dimensionSize);
// run test, depending on algorithm
switch (algorithm) {
// Sequential
case 0:
name = "Sequential";
printf("%d,Sequential,", dimensionSize);
runSequentialTest(h_Adata, h_Ldata, dimensionSize);
break;
// CUDA Normal Single Kernel
case 1:
name = "SingleKarnel";
printf("%d,CUDA_NormalS,%d,%d,", dimensionSize, threads_per_block, cutoff);
timersGPU[index] = runCUDATest_NormalS(h_Adata, h_Ldata, dimensionSize, threads_per_block, cutoff);
break;
// CUDA Normal Multiple Kernels
case 2:
name = "MultiKarnel";
printf("%d,CUDA_NormalM,%d,%d,", dimensionSize, threads_per_block, cutoff);
timersGPU[index] = runCUDACholeskyByTheBook(h_Adata, h_Ldata, dimensionSize, threads_per_block, cutoff);
break;
// CUDA InPlace Multiple Kernels
case 3:
name = "InPlaceMultiKarnel";
printf("%d,CUDA_InPlaceM,%d,%d,", dimensionSize, threads_per_block, cutoff);
timersGPU[index] = runCUDACholeskyByTheBookInPlaceM(h_Adata, h_Ldata, dimensionSize, threads_per_block, cutoff);
break;
// CuSolver
case 4:
name = "CUSOLVER";
printf("%d,CUSOLVER,", dimensionSize);
timersGPU[index] = runCuSolverTest(h_Adata, h_Ldata, dimensionSize);
break;
case 5:
name = "SyncChols";
printf("%d,SyncChols,%d,%d,", dimensionSize, threads_per_block, cutoff);
timersGPU[index] = computeSyncSingleKarnelOneBlock(h_Adata, h_Ldata, dimensionSize, threads_per_block);
h_Ldata = transpose(h_Ldata, dimensionSize);
break;
break;
}
// compute reference solution
float* h_LGdata = spd_create_blankf(dimensionSize);
timersCPU[index] = computeGold(h_Adata, h_LGdata, dimensionSize);
printf("Input Matrix:\n");
spd_print_matrixf(h_Adata, dimensionSize, 16);
printf("Gold Matrix:\n");
spd_print_matrixf(h_LGdata, dimensionSize, 16);
printf("GPU Output Matrix:\n");
spd_print_matrixf(h_Ldata, dimensionSize, 16);
printf("Comparing ... ");
spd_compare_matricesf(h_Ldata, h_LGdata, dimensionSize, 0.001);
spd_free_matrixf(h_LGdata);
// free matrices
spd_free_matrixf(h_Adata);
spd_free_matrixf(h_Ldata);
}
writeResultToFile(name, timersGPU, timersCPU, LIST_SIZE);
// exit
exit(EXIT_SUCCESS);
}
void writeResultToFile(char* name, float* contentGPU, double* contentCPU,int LIST_SIZE) {
FILE* f = fopen(name, "a");
for (int i = 0; i < LIST_SIZE; i++) {
fprintf(f, "%f, %0.8f \n", contentGPU[i], contentCPU[i]);
}
fprintf(f, "\n");
fclose(f);
}
float* transpose(float* h_Adata,int dimensionSize) {
float* elements = (float*)malloc(dimensionSize * dimensionSize * sizeof(float));
for (int i = 0; i < dimensionSize; i++)
for (int j = 0; j < dimensionSize; j++)
elements[i * dimensionSize + j] = h_Adata[j * dimensionSize + i];
spd_free_matrixf(h_Adata);
return elements;
}
////////////////////////////////////////////////////////////////////////////////
//! Run Tequential test
////////////////////////////////////////////////////////////////////////////////
void runSequentialTest(float* A, float* L, const unsigned int dimensionSize)
{
// initialize timer
clock_t start, end;
double cpu_time_used;
double total_sum = 0;
start = clock();
int i, j, k;
float sum;
for (j = 0; j < dimensionSize; j++) {
sum = 0;
for (k = 0; k < j; k++) {
sum += L[j * dimensionSize + k] * L[j * dimensionSize + k];
}
L[j * dimensionSize + j] = sqrt(A[j * dimensionSize + j] - sum);
for (i = j + 1; i < dimensionSize; i++) {
sum = 0;
for (k = 0; k < j; k++) {
sum += L[i * dimensionSize + k] * L[j * dimensionSize + k];
}
L[i * dimensionSize + j] = (1.0 / L[j * dimensionSize + j] *
(A[i * dimensionSize + j] - sum));
}
}
// stop timer
end = clock();
cpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
printf("%0.8f\n", cpu_time_used);
}
////////////////////////////////////////////////////////////////////////////////
//! Run CUDA test. Normal Single Kernel
////////////////////////////////////////////////////////////////////////////////
float runCUDATest_NormalS(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize,
const int threads_per_block, const int cutoff)
{
// set device id
// initialize timer
cudaEvent_t start, stop;
gpuErrchk(cudaEventCreate(&start));
gpuErrchk(cudaEventCreate(&stop));
gpuErrchk(cudaEventRecord(start));
// allocate device memory ...
unsigned int mem_size = sizeof(float) * dimensionSize * dimensionSize;
// ... input
float* d_Adata;
gpuErrchk(cudaMalloc((void**)&d_Adata, mem_size));
// copy host memory to device
gpuErrchk(cudaMemcpy(d_Adata, h_Adata, mem_size,
cudaMemcpyHostToDevice));
// ... output
float* d_Ldata;
gpuErrchk(cudaMalloc((void**)&d_Ldata, mem_size));
// execute the kernels
int j;
int num_blocks;
for (j = 0; j < dimensionSize - cutoff; j++) {
num_blocks = ceil((float)(dimensionSize - j) / (float)threads_per_block);
choleskyByTheBookSingleKarnel <<< num_blocks, threads_per_block >> > (d_Adata, d_Ldata, dimensionSize, j);
}
// check if kernel execution generated and error
// copy result from device to host
gpuErrchk(cudaMemcpy(h_Ldata, d_Ldata, mem_size,
cudaMemcpyDeviceToHost));
// Sequenial part (based on cutoff)
float sum;
int i, k;
for (j = dimensionSize - cutoff; j < dimensionSize; j++) {
// Diagonal value
sum = 0;
for (k = 0; k < j; k++) {
sum += h_Ldata[j * dimensionSize + k] * h_Ldata[j * dimensionSize + k];
}
h_Ldata[j * dimensionSize + j] = sqrt(h_Adata[j * dimensionSize + j] - sum);
// Calculate all other rows
for (i = j + 1; i < dimensionSize; i++) { // for each row below main diagonal
sum = 0;
for (k = 0; k < j; k++) {
sum += h_Ldata[i * dimensionSize + k] * h_Ldata[j * dimensionSize + k];
}
h_Ldata[i * dimensionSize + j] = (1.0 / h_Ldata[j * dimensionSize + j] *
(h_Adata[i * dimensionSize + j] - sum));
}
}
// stop timer
gpuErrchk(cudaEventRecord(stop));
gpuErrchk(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
gpuErrchk(cudaEventElapsedTime(&msecTotal, start, stop));
float timerResoultList = msecTotal / 1000;
printf("%0.8f\n", timerResoultList);
// cleanup memory
gpuErrchk(cudaFree(d_Adata));
gpuErrchk(cudaFree(d_Ldata));
return timerResoultList;
}
////////////////////////////////////////////////////////////////////////////////
//! Run CUDA test. Normal Multiple Kernel
////////////////////////////////////////////////////////////////////////////////
float runCUDACholeskyByTheBook(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize, const int threads_per_block, const int cutoff){
cudaEvent_t start, stop;
gpuErrchk(cudaEventCreate(&start));
gpuErrchk(cudaEventCreate(&stop));
gpuErrchk(cudaEventRecord(start));
// allocate device memory ...
unsigned int mem_size = sizeof(float) * dimensionSize * dimensionSize;
// ... input
float* d_Adata;
gpuErrchk(cudaMalloc((void**)&d_Adata, mem_size));
// copy host memory to device
gpuErrchk(cudaMemcpy(d_Adata, h_Adata, mem_size,
cudaMemcpyHostToDevice));
// ... output
float* d_Ldata;
gpuErrchk(cudaMalloc((void**)&d_Ldata, mem_size));
// execute the kernels
int j;
int num_blocks;
if (cutoff > 0) {
// some processing will be on host
for (j = 0; j < dimensionSize - cutoff; j++) {
num_blocks = ceil((float)(dimensionSize - j) / (float)threads_per_block);
choleskyByTheBookMKernelDiagonal <<< 1, 1 >> > (d_Adata, d_Ldata, dimensionSize, j);
choleskyByTheBookMKernelBelowDiagonal <<< num_blocks, threads_per_block >> > (d_Adata, d_Ldata, dimensionSize, j);
}
}
else {
// cutoff = 0, all processing will be on GPU
for (j = 0; j < dimensionSize - 1; j++) {
num_blocks = ceil((float)(dimensionSize - j) / (float)threads_per_block);
choleskyByTheBookMKernelDiagonal <<< 1, 1 >> > (d_Adata, d_Ldata, dimensionSize, j);
choleskyByTheBookMKernelBelowDiagonal <<< num_blocks, threads_per_block >> > (d_Adata, d_Ldata, dimensionSize, j);
}
choleskyByTheBookMKernelDiagonal <<< 1, 1 >> > (d_Adata, d_Ldata, dimensionSize, j);
}
// check if kernel execution generated and error
// copy result from device to host
gpuErrchk(cudaMemcpy(h_Ldata, d_Ldata, mem_size,
cudaMemcpyDeviceToHost));
// Sequenial part (based on cutoff)
float sum;
int i, k;
for (j = dimensionSize - cutoff; j < dimensionSize; j++) {
// Diagonal value
sum = 0;
for (k = 0; k < j; k++) {
sum += h_Ldata[j * dimensionSize + k] * h_Ldata[j * dimensionSize + k];
}
h_Ldata[j * dimensionSize + j] = sqrt(h_Adata[j * dimensionSize + j] - sum);
// Calculate all other rows
for (i = j + 1; i < dimensionSize; i++) { // for each row below main diagonal
sum = 0;
for (k = 0; k < j; k++) {
sum += h_Ldata[i * dimensionSize + k] * h_Ldata[j * dimensionSize + k];
}
h_Ldata[i * dimensionSize + j] = (1.0 / h_Ldata[j * dimensionSize + j] *
(h_Adata[i * dimensionSize + j] - sum));
}
}
// stop timer
gpuErrchk(cudaEventRecord(stop));
gpuErrchk(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
gpuErrchk(cudaEventElapsedTime(&msecTotal, start, stop));
float timerResoultList = msecTotal / 1000;
printf("%0.8f\n", timerResoultList);
// cleanup memory
gpuErrchk(cudaFree(d_Adata));
gpuErrchk(cudaFree(d_Ldata));
return timerResoultList;
}
float computeSyncSingleKarnelOneBlock(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize ,const int threads_per_block) {
float* d_Adata;
cudaEvent_t start, stop;
unsigned int mem_size = sizeof(float) * dimensionSize * dimensionSize;
gpuErrchk(cudaMalloc((void**)&d_Adata, mem_size));
gpuErrchk(cudaMemcpy(d_Adata, h_Adata, mem_size,
cudaMemcpyHostToDevice));
gpuErrchk(cudaEventCreate(&start));
gpuErrchk(cudaEventCreate(&stop));
gpuErrchk(cudaEventRecord(start));
//Operations per thread
int num_blocks = ceil((float)(dimensionSize) / (float)dimensionSize);
//float ops_per_thread = dimensionSize / (threads_per_block * num_blocks);
dim3 thread_block(dimensionSize, 1, 1);
dim3 grid(num_blocks, 1);
if (dimensionSize == 32) {
chol_kernel_one_block<32> << <grid, thread_block >> > (d_Adata, dimensionSize);
}
else if (dimensionSize == 64) {
chol_kernel_one_block<64> << <grid, thread_block >> > (d_Adata, dimensionSize);
}
else if (dimensionSize == 128) {
chol_kernel_one_block<128> << <grid, thread_block >> > (d_Adata, dimensionSize);
}
else if (dimensionSize == 256) {
chol_kernel_one_block<256> << <grid, thread_block >> > (d_Adata, dimensionSize);
}
else if (dimensionSize == 512) {
chol_kernel_one_block<512> << <grid, thread_block >> > (d_Adata, dimensionSize);
}
else if (dimensionSize == 1024) {
chol_kernel_one_block<1024> << <grid, thread_block >> > (d_Adata, dimensionSize);
}
else if (dimensionSize == 2048) {
chol_kernel_one_block<2048> << <grid, thread_block >> > (d_Adata, dimensionSize);
}
cudaDeviceSynchronize();
gpuErrchk(cudaEventRecord(stop));
gpuErrchk(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
gpuErrchk(cudaEventElapsedTime(&msecTotal, start, stop));
float timerResoultList = msecTotal / 1000;
// copy result from device to host
gpuErrchk(cudaMemcpy(h_Ldata, d_Adata, mem_size, cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(d_Adata));
return timerResoultList;
}
////////////////////////////////////////////////////////////////////////////////
//! Run CUDA test. In-Place Multiple Kernel
////////////////////////////////////////////////////////////////////////////////
float runCUDACholeskyByTheBookInPlaceM(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize, const int threads_per_block, const int cutoff){
cudaEvent_t start, stop;
gpuErrchk(cudaEventCreate(&start));
gpuErrchk(cudaEventCreate(&stop));
gpuErrchk(cudaEventRecord(start));
// allocate device memory ...
unsigned int mem_size = sizeof(float) * dimensionSize * dimensionSize;
// ... input/output
float* d_Adata;
gpuErrchk(cudaMalloc((void**)&d_Adata, mem_size));
// copy host memory to device
gpuErrchk(cudaMemcpy(d_Adata, h_Adata, mem_size,
cudaMemcpyHostToDevice));
// execute the kernels
int j;
int num_blocks;
if (cutoff > 0) {
for (j = 0; j < dimensionSize - cutoff; j++) {
num_blocks = ceil((float)(dimensionSize - j) / (float)threads_per_block);
choleskyByTheBookInPlaceDiagonal <<< 1, 1 >> > (d_Adata, dimensionSize, j);
choleskyByTheBookInPlaceBelowDiagonal << < num_blocks, threads_per_block >> > (d_Adata, dimensionSize, j);
}
}
else {
for (j = 0; j < dimensionSize - 1; j++) {
num_blocks = ceil((float)(dimensionSize - j) / (float)threads_per_block);
choleskyByTheBookInPlaceDiagonal <<< 1, 1 >> > (d_Adata, dimensionSize, j);
choleskyByTheBookInPlaceBelowDiagonal <<< num_blocks, threads_per_block >> > (d_Adata, dimensionSize, j);
}
choleskyByTheBookInPlaceDiagonal <<< 1, 1 >> > (d_Adata, dimensionSize, j);
}
// copy result from device to host
gpuErrchk(cudaMemcpy(h_Ldata, d_Adata, mem_size,
cudaMemcpyDeviceToHost));
// reset rest of matrix
int i;
for (i = 0; i < dimensionSize; i++) {
for (j = 0; j < i; j++) {
h_Ldata[j * dimensionSize + i] = 0;
}
}
// Sequenial part (based on cutoff)
float sum;
int k;
for (j = dimensionSize - cutoff; j < dimensionSize; j++) {
// Diagonal value
sum = 0;
for (k = 0; k < j; k++) {
sum += h_Ldata[j * dimensionSize + k] * h_Ldata[j * dimensionSize + k];
}
h_Ldata[j * dimensionSize + j] = sqrt(h_Ldata[j * dimensionSize + j] - sum);
// Calculate all other rows
for (i = j + 1; i < dimensionSize; i++) { // for each row below main diagonal
sum = 0;
for (k = 0; k < j; k++) {
sum += h_Ldata[i * dimensionSize + k] * h_Ldata[j * dimensionSize + k];
}
h_Ldata[i * dimensionSize + j] = (1.0 / h_Ldata[j * dimensionSize + j] *
(h_Ldata[i * dimensionSize + j] - sum));
}
}
// stop timer
gpuErrchk(cudaEventRecord(stop));
gpuErrchk(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
gpuErrchk(cudaEventElapsedTime(&msecTotal, start, stop));
float timerResoultList = msecTotal / 1000;
printf("%0.8f\n", timerResoultList );
// cleanup memory
gpuErrchk(cudaFree(d_Adata));
return timerResoultList;
}
////////////////////////////////////////////////////////////////////////////////
//! Run cuSolver test
////////////////////////////////////////////////////////////////////////////////
float runCuSolverTest(float* h_Adata, float* h_Ldata, const unsigned int dimensionSize)
{
// initialize timer
cudaEvent_t start, stop;
gpuErrchk(cudaEventCreate(&start));
gpuErrchk(cudaEventCreate(&stop));
gpuErrchk(cudaEventRecord(start));
// allocate device memory ...
unsigned int mem_size = sizeof(float) * dimensionSize * dimensionSize;
// ... input
float* d_Adata;
gpuErrchk(cudaMalloc((void**)&d_Adata, mem_size));
// copy host memory to device
gpuErrchk(cudaMemcpy(d_Adata, h_Adata, mem_size,
cudaMemcpyHostToDevice));
// init cusolver varialbes
int work_size = 0;
int* devInfo;
gpuErrchk(cudaMalloc(&devInfo, sizeof(int)));
// initialize cusolver handle
cusolverDnHandle_t solver_handle;
cusolverDnCreate(&solver_handle);
// initialize Spotrf
cusolverDnSpotrf_bufferSize(solver_handle, CUBLAS_FILL_MODE_UPPER, dimensionSize, d_Adata, dimensionSize, &work_size);
// execute Cholesky on device (potrf)
float* work;
gpuErrchk(cudaMalloc(&work, work_size * sizeof(float)));
cusolverDnSpotrf(solver_handle, CUBLAS_FILL_MODE_UPPER, dimensionSize, d_Adata, dimensionSize, work, work_size, devInfo);
int devInfo_h = 0;
gpuErrchk(cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost));
if (devInfo_h != 0) {
printf("Unsuccessful potrf execution\n\n");
}
// copy result from device to host (copy to output)
gpuErrchk(cudaMemcpy(h_Ldata, d_Adata, mem_size,
cudaMemcpyDeviceToHost));
// reset rest of matrix
int i, j;
for (i = 0; i < dimensionSize; i++) {
for (j = 0; j < i; j++) {
h_Ldata[j * dimensionSize + i] = 0;
}
}
// destroy cuSolver handle
cusolverDnDestroy(solver_handle);
// stop timer
gpuErrchk(cudaEventRecord(stop));
gpuErrchk(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
gpuErrchk(cudaEventElapsedTime(&msecTotal, start, stop));
float timerResoultList = msecTotal / 1000;
printf("%0.8f\n", timerResoultList );
// cleanup memory
gpuErrchk(cudaFree(d_Adata));
return timerResoultList;
}
////////////////////////////////////////////////////////////////////////////////
// matrix helper functions
float* spd_create_symetricf(unsigned int dimension, float minValue, float maxValue)
{
float* m = (float*)calloc(dimension * dimension, sizeof(float));
unsigned int i, j;
for (i = 0; i < dimension; i++) {
for (j = 0; j <= i; j++) {
m[i * dimension + j] = spd_random_float(minValue, maxValue);
m[j * dimension + i] = m[i * dimension + j];
}
}
return m;
}
float* spd_make_positive_definitef(float* A, unsigned int dimension, float offset)
{
unsigned int i;
for (i = 0; i < dimension; i++) // A = A + n*I(n);
A[i * dimension + i] = A[i * dimension + i] + offset;
return A;
}
float* spd_create_blankf(unsigned int dimension)
{
float* m = (float*)calloc(dimension * dimension, sizeof(float));
unsigned int i;
for (i = 0; i < dimension * dimension; i++)
m[i] = 0;
return m;
}
float spd_random_float(float fMin, float fMax)
{
float f = (float)rand() / RAND_MAX;
return fMin + f * (fMax - fMin);
}
void spd_print_matrixf(float* A, unsigned int dimension, int count)
{
unsigned int i, j;
if (dimension < count)
count = dimension;
for (i = 0; i < count; i++)
{
for (j = 0; j < count; j++)
{
printf("%0.2f\t", A[i * dimension + j]);
}
printf("\n");
}
}
int spd_compare_matricesf(float* A, float* B, int dimension, float epsilon)
{
int correct = 1;
int errors = 0;
int i, j;
for (i = 0; i < dimension; i++) {
for (j = 0; j < dimension; j++) {
if (fabs(A[i * dimension + j] - B[i * dimension + j]) > epsilon) {
if (correct)
printf(" (%d, %d): %0.5f != %0.5f\n", i, j, A[i * dimension + j], B[i * dimension + j]);
correct = 0;
errors++;
}
}
}
printf(" Total errors: %d\n", errors);
return errors;
}
void spd_free_matrixf(float* A)
{
free(A);
}
double computeGold(float* A, float* L, const unsigned int dimensionSize)
{
clock_t start, end;
double cpu_time_used;
double total_sum = 0;
start = clock();
int i, j, k;
float sum;
for (j = 0; j < dimensionSize; j++) {
sum = 0;
for (k = 0; k < j; k++) {
sum += L[j * dimensionSize + k] * L[j * dimensionSize + k];
}
L[j * dimensionSize + j] = sqrt(A[j * dimensionSize + j] - sum);
for (i = j + 1; i < dimensionSize; i++) {
sum = 0;
for (k = 0; k < j; k++) {
sum += L[i * dimensionSize + k] * L[j * dimensionSize + k];
}
L[i * dimensionSize + j] = (1.0 / L[j * dimensionSize + j] *
(A[i * dimensionSize + j] - sum));
}
}
end = clock();
cpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
printf("%0.8f\n", cpu_time_used);
return cpu_time_used;
}
|
c2c184290832ddc48102e50c0050a35143c22670.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "svgf/svgf.h"
#include "kernel/context.cuh"
#include "kernel/light.cuh"
#include "kernel/material.cuh"
#include "kernel/intersect.cuh"
#include "kernel/accelerator.cuh"
#include "kernel/StreamCompaction.h"
#include "kernel/pt_common.h"
#include "cuda/cudadefs.h"
#include "cuda/helper_math.h"
#include "cuda/cudautil.h"
#include "cuda/cudamemory.h"
#include "aten4idaten.h"
__global__ void fillAOV(
idaten::TileDomain tileDomain,
hipSurfaceObject_t dst,
idaten::SVGFPathTracing::AOVMode mode,
int width, int height,
const float4* __restrict__ aovNormalDepth,
const float4* __restrict__ aovTexclrMeshid,
hipSurfaceObject_t motionDetphBuffer,
const aten::CameraParameter* __restrict__ camera,
const aten::GeomParameter* __restrict__ shapes, int geomnum,
hipTextureObject_t* nodes,
const aten::PrimitiveParamter* __restrict__ prims,
hipTextureObject_t vtxPos,
const aten::mat4* __restrict__ matrices)
{
auto ix = blockIdx.x * blockDim.x + threadIdx.x;
auto iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= tileDomain.w || iy >= tileDomain.h) {
return;
}
const aten::vec3 colors[] = {
aten::vec3(255, 0, 0),
aten::vec3( 0, 255, 0),
aten::vec3( 0, 0, 255),
aten::vec3(255, 255, 0),
aten::vec3(255, 0, 255),
aten::vec3( 0, 255, 255),
aten::vec3(128, 128, 128),
aten::vec3( 86, 99, 143),
aten::vec3( 71, 234, 126),
aten::vec3(124, 83, 53),
};
ix += tileDomain.x;
iy += tileDomain.y;
const auto idx = getIdx(ix, iy, width);
float s = (ix + 0.5f) / (float)(width);
float t = (iy + 0.5f) / (float)(height);
AT_NAME::CameraSampleResult camsample;
AT_NAME::PinholeCamera::sample(&camsample, camera, s, t);
Context ctxt;
{
ctxt.geomnum = geomnum;
ctxt.shapes = shapes;
ctxt.nodes = nodes;
ctxt.prims = prims;
ctxt.vtxPos = vtxPos;
ctxt.matrices = matrices;
}
aten::Intersection isect;
bool isHit = intersectClosest(&ctxt, camsample.r, &isect);
float4 clr = make_float4(1);
if (mode == idaten::SVGFPathTracing::AOVMode::Normal) {
auto n = aovNormalDepth[idx] * 0.5f + 0.5f;
clr = make_float4(n.x, n.y, n.z, 1);
}
else if (mode == idaten::SVGFPathTracing::AOVMode::Depth) {
// TODO
}
else if (mode == idaten::SVGFPathTracing::AOVMode::TexColor) {
clr = aovTexclrMeshid[idx];
}
else if (mode == idaten::SVGFPathTracing::AOVMode::WireFrame) {
bool isHitEdge = (isect.a < 1e-2) || (isect.b < 1e-2) || (1 - isect.a - isect.b < 1e-2);
clr = isHitEdge ? make_float4(0) : make_float4(1);
}
else if (mode == idaten::SVGFPathTracing::AOVMode::BaryCentric) {
auto c = 1 - isect.a - isect.b;
clr = make_float4(isect.a, isect.b, c, 1);
}
else if (mode == idaten::SVGFPathTracing::AOVMode::Motion) {
float4 data;
surf2Dread(&data, motionDetphBuffer, ix * sizeof(float4), iy);
// TODO
float motionX = data.x;
float motionY = data.y;
clr = make_float4(motionX, motionY, 0, 1);
}
else if (mode == idaten::SVGFPathTracing::AOVMode::ObjId) {
#if 0
int objid = isect.meshid;
#else
int objid = isect.mtrlid;
#endif
if (objid >= 0) {
objid %= AT_COUNTOF(colors);
auto c = colors[objid];
clr = make_float4(c.x, c.y, c.z, 1);
clr /= 255.0f;
}
else {
clr = make_float4(0, 0, 0, 1);
}
}
surf2Dwrite(
clr,
dst,
ix * sizeof(float4), iy,
hipBoundaryModeTrap);
}
__global__ void pickPixel(
idaten::SVGFPathTracing::PickedInfo* dst,
int ix, int iy,
int width, int height,
const aten::CameraParameter* __restrict__ camera,
const idaten::SVGFPathTracing::Path* __restrict__ paths,
const float4* __restrict__ aovNormalDepth,
const float4* __restrict__ aovTexclrMeshid,
const aten::GeomParameter* __restrict__ shapes, int geomnum,
hipTextureObject_t* nodes,
const aten::PrimitiveParamter* __restrict__ prims,
hipTextureObject_t vtxPos,
aten::mat4* matrices)
{
iy = height - 1 - iy;
float s = (ix + 0.5f) / (float)(camera->width);
float t = (iy + 0.5f) / (float)(camera->height);
AT_NAME::CameraSampleResult camsample;
AT_NAME::PinholeCamera::sample(&camsample, camera, s, t);
Context ctxt;
{
ctxt.geomnum = geomnum;
ctxt.shapes = shapes;
ctxt.nodes = nodes;
ctxt.prims = prims;
ctxt.vtxPos = vtxPos;
ctxt.matrices = matrices;
}
aten::Intersection isect;
bool isHit = intersectClosest(&ctxt, camsample.r, &isect);
if (isHit) {
const auto idx = getIdx(ix, iy, width);
auto normalDepth = aovNormalDepth[idx];
auto texclrMeshid = aovTexclrMeshid[idx];
dst->ix = ix;
dst->iy = iy;
dst->color = aten::vec3(paths->contrib[idx].contrib.x, paths->contrib[idx].contrib.y, paths->contrib[idx].contrib.z);
dst->normal = aten::vec3(normalDepth.x, normalDepth.y, normalDepth.z);
dst->depth = normalDepth.w;
dst->meshid = (int)texclrMeshid.w;
dst->triid = isect.primid;
dst->mtrlid = isect.mtrlid;
}
else {
dst->ix = -1;
dst->iy = -1;
}
}
namespace idaten
{
void SVGFPathTracing::onDisplayAOV(
hipSurfaceObject_t outputSurf,
int width, int height,
hipTextureObject_t texVtxPos)
{
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(m_tileDomain.w + block.x - 1) / block.x,
(m_tileDomain.h + block.y - 1) / block.y);
int curaov = getCurAovs();
CudaGLResourceMapper rscmap(&m_motionDepthBuffer);
auto gbuffer = m_motionDepthBuffer.bind();
fillAOV << <grid, block >> > (
m_tileDomain,
outputSurf,
m_aovMode,
width, height,
m_aovNormalDepth[curaov].ptr(),
m_aovTexclrMeshid[curaov].ptr(),
gbuffer,
m_cam.ptr(),
m_shapeparam.ptr(), m_shapeparam.num(),
m_nodetex.ptr(),
m_primparams.ptr(),
texVtxPos,
m_mtxparams.ptr());
}
void SVGFPathTracing::pick(
int ix, int iy,
int width, int height,
hipTextureObject_t texVtxPos)
{
if (m_willPicklPixel) {
m_pick.init(1);
int curaov = getCurAovs();
pickPixel << <1, 1 >> > (
m_pick.ptr(),
m_pickedInfo.ix, m_pickedInfo.iy,
width, height,
m_cam.ptr(),
m_paths.ptr(),
m_aovNormalDepth[curaov].ptr(),
m_aovTexclrMeshid[curaov].ptr(),
m_shapeparam.ptr(), m_shapeparam.num(),
m_nodetex.ptr(),
m_primparams.ptr(),
texVtxPos,
m_mtxparams.ptr());
m_pick.readByNum(&m_pickedInfo);
m_willPicklPixel = false;
}
}
} | c2c184290832ddc48102e50c0050a35143c22670.cu | #include "svgf/svgf.h"
#include "kernel/context.cuh"
#include "kernel/light.cuh"
#include "kernel/material.cuh"
#include "kernel/intersect.cuh"
#include "kernel/accelerator.cuh"
#include "kernel/StreamCompaction.h"
#include "kernel/pt_common.h"
#include "cuda/cudadefs.h"
#include "cuda/helper_math.h"
#include "cuda/cudautil.h"
#include "cuda/cudamemory.h"
#include "aten4idaten.h"
__global__ void fillAOV(
idaten::TileDomain tileDomain,
cudaSurfaceObject_t dst,
idaten::SVGFPathTracing::AOVMode mode,
int width, int height,
const float4* __restrict__ aovNormalDepth,
const float4* __restrict__ aovTexclrMeshid,
cudaSurfaceObject_t motionDetphBuffer,
const aten::CameraParameter* __restrict__ camera,
const aten::GeomParameter* __restrict__ shapes, int geomnum,
cudaTextureObject_t* nodes,
const aten::PrimitiveParamter* __restrict__ prims,
cudaTextureObject_t vtxPos,
const aten::mat4* __restrict__ matrices)
{
auto ix = blockIdx.x * blockDim.x + threadIdx.x;
auto iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= tileDomain.w || iy >= tileDomain.h) {
return;
}
const aten::vec3 colors[] = {
aten::vec3(255, 0, 0),
aten::vec3( 0, 255, 0),
aten::vec3( 0, 0, 255),
aten::vec3(255, 255, 0),
aten::vec3(255, 0, 255),
aten::vec3( 0, 255, 255),
aten::vec3(128, 128, 128),
aten::vec3( 86, 99, 143),
aten::vec3( 71, 234, 126),
aten::vec3(124, 83, 53),
};
ix += tileDomain.x;
iy += tileDomain.y;
const auto idx = getIdx(ix, iy, width);
float s = (ix + 0.5f) / (float)(width);
float t = (iy + 0.5f) / (float)(height);
AT_NAME::CameraSampleResult camsample;
AT_NAME::PinholeCamera::sample(&camsample, camera, s, t);
Context ctxt;
{
ctxt.geomnum = geomnum;
ctxt.shapes = shapes;
ctxt.nodes = nodes;
ctxt.prims = prims;
ctxt.vtxPos = vtxPos;
ctxt.matrices = matrices;
}
aten::Intersection isect;
bool isHit = intersectClosest(&ctxt, camsample.r, &isect);
float4 clr = make_float4(1);
if (mode == idaten::SVGFPathTracing::AOVMode::Normal) {
auto n = aovNormalDepth[idx] * 0.5f + 0.5f;
clr = make_float4(n.x, n.y, n.z, 1);
}
else if (mode == idaten::SVGFPathTracing::AOVMode::Depth) {
// TODO
}
else if (mode == idaten::SVGFPathTracing::AOVMode::TexColor) {
clr = aovTexclrMeshid[idx];
}
else if (mode == idaten::SVGFPathTracing::AOVMode::WireFrame) {
bool isHitEdge = (isect.a < 1e-2) || (isect.b < 1e-2) || (1 - isect.a - isect.b < 1e-2);
clr = isHitEdge ? make_float4(0) : make_float4(1);
}
else if (mode == idaten::SVGFPathTracing::AOVMode::BaryCentric) {
auto c = 1 - isect.a - isect.b;
clr = make_float4(isect.a, isect.b, c, 1);
}
else if (mode == idaten::SVGFPathTracing::AOVMode::Motion) {
float4 data;
surf2Dread(&data, motionDetphBuffer, ix * sizeof(float4), iy);
// TODO
float motionX = data.x;
float motionY = data.y;
clr = make_float4(motionX, motionY, 0, 1);
}
else if (mode == idaten::SVGFPathTracing::AOVMode::ObjId) {
#if 0
int objid = isect.meshid;
#else
int objid = isect.mtrlid;
#endif
if (objid >= 0) {
objid %= AT_COUNTOF(colors);
auto c = colors[objid];
clr = make_float4(c.x, c.y, c.z, 1);
clr /= 255.0f;
}
else {
clr = make_float4(0, 0, 0, 1);
}
}
surf2Dwrite(
clr,
dst,
ix * sizeof(float4), iy,
cudaBoundaryModeTrap);
}
__global__ void pickPixel(
idaten::SVGFPathTracing::PickedInfo* dst,
int ix, int iy,
int width, int height,
const aten::CameraParameter* __restrict__ camera,
const idaten::SVGFPathTracing::Path* __restrict__ paths,
const float4* __restrict__ aovNormalDepth,
const float4* __restrict__ aovTexclrMeshid,
const aten::GeomParameter* __restrict__ shapes, int geomnum,
cudaTextureObject_t* nodes,
const aten::PrimitiveParamter* __restrict__ prims,
cudaTextureObject_t vtxPos,
aten::mat4* matrices)
{
iy = height - 1 - iy;
float s = (ix + 0.5f) / (float)(camera->width);
float t = (iy + 0.5f) / (float)(camera->height);
AT_NAME::CameraSampleResult camsample;
AT_NAME::PinholeCamera::sample(&camsample, camera, s, t);
Context ctxt;
{
ctxt.geomnum = geomnum;
ctxt.shapes = shapes;
ctxt.nodes = nodes;
ctxt.prims = prims;
ctxt.vtxPos = vtxPos;
ctxt.matrices = matrices;
}
aten::Intersection isect;
bool isHit = intersectClosest(&ctxt, camsample.r, &isect);
if (isHit) {
const auto idx = getIdx(ix, iy, width);
auto normalDepth = aovNormalDepth[idx];
auto texclrMeshid = aovTexclrMeshid[idx];
dst->ix = ix;
dst->iy = iy;
dst->color = aten::vec3(paths->contrib[idx].contrib.x, paths->contrib[idx].contrib.y, paths->contrib[idx].contrib.z);
dst->normal = aten::vec3(normalDepth.x, normalDepth.y, normalDepth.z);
dst->depth = normalDepth.w;
dst->meshid = (int)texclrMeshid.w;
dst->triid = isect.primid;
dst->mtrlid = isect.mtrlid;
}
else {
dst->ix = -1;
dst->iy = -1;
}
}
namespace idaten
{
void SVGFPathTracing::onDisplayAOV(
cudaSurfaceObject_t outputSurf,
int width, int height,
cudaTextureObject_t texVtxPos)
{
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(m_tileDomain.w + block.x - 1) / block.x,
(m_tileDomain.h + block.y - 1) / block.y);
int curaov = getCurAovs();
CudaGLResourceMapper rscmap(&m_motionDepthBuffer);
auto gbuffer = m_motionDepthBuffer.bind();
fillAOV << <grid, block >> > (
m_tileDomain,
outputSurf,
m_aovMode,
width, height,
m_aovNormalDepth[curaov].ptr(),
m_aovTexclrMeshid[curaov].ptr(),
gbuffer,
m_cam.ptr(),
m_shapeparam.ptr(), m_shapeparam.num(),
m_nodetex.ptr(),
m_primparams.ptr(),
texVtxPos,
m_mtxparams.ptr());
}
void SVGFPathTracing::pick(
int ix, int iy,
int width, int height,
cudaTextureObject_t texVtxPos)
{
if (m_willPicklPixel) {
m_pick.init(1);
int curaov = getCurAovs();
pickPixel << <1, 1 >> > (
m_pick.ptr(),
m_pickedInfo.ix, m_pickedInfo.iy,
width, height,
m_cam.ptr(),
m_paths.ptr(),
m_aovNormalDepth[curaov].ptr(),
m_aovTexclrMeshid[curaov].ptr(),
m_shapeparam.ptr(), m_shapeparam.num(),
m_nodetex.ptr(),
m_primparams.ptr(),
texVtxPos,
m_mtxparams.ptr());
m_pick.readByNum(&m_pickedInfo);
m_willPicklPixel = false;
}
}
} |
97c25a58deb10be4f743f6840625e45474bf26e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "jacobi.h"
#include "alloc3d_gpu.h"
#include "transfer3d_gpu.h"
#include "alloc3d.h"
#define BLOCK_SIZE 8
void
assign_ufu_old(double*** u, double*** f, double*** u_old, int N, double start_T){
int radxi = 0,
radxf = (5 * N)/16, // (-3/8 + 1) * N/2
radyi = 0,
radyf = N/4, // (_1/2 + 1) * N/2
radzi = N/6 + (N%6 > 0), // (-2/3 + 1) * N/2 truncating upwards if there's some remainder.
radzf = N/2; // (0 + 1) * N/2
int i,j,k;
for (i=0; i<N; i++){
for (j=0;j<N;j++){
for (k=0; k<N; k++){
u_old[i][j][k]=start_T;
u[i][j][k]=start_T;
f[i][j][k]=0;
}
}
}
for(i=0;i<N;i++){
for(j=0;j<N;j++){
u_old[i][0][j] = 0.;
u_old[i][N-1][j]=20.;
u[i][0][j] = 0.;
u[i][N-1][j]=20.;
u_old[i][j][0]=20.;
u_old[i][j][N-1]=20.;
u[i][j][0]=20.;
u[i][j][N-1]=20.;
u_old[0][i][j]=20.;
u_old[N-1][i][j]=20.;
u[0][i][j]=20.;
u[N-1][i][j]=20.;
}
}
// printf("X: %d - %d. Y: %d - %d. Z: %d - %d\n", radxi, radxf, radyi, radyf, radzi, radzf);
for (i = radxi; i <= radxf; i++) {
for (j = radyi; j <= radyf; j++) {
for (k = radzi; k <= radzf; k++) {
f[i][j][k] = 200;
}
}
}
}
void print_matrix(double*** A, int N){
int i,j,k;
for (i=0; i<N; i++){
printf("\n %d -th Layer \n", i);
for(j=0; j<N; j++){
for(k=0; k<N; k++){
printf("%lf \t", A[i][j][k]);
}
printf("\n");
}
}
}
int main(int argc, char *argv[]){
int N = 5;
int iter_max = 10;
double start_T, tolerance=10;
int output_type = 0;
double ***u_h = NULL;
double ***f_h = NULL;
double ***prev_u_h=NULL;
double ***u_d=NULL;
double ***f_d=NULL;
double ***prev_u_d=NULL;
/* get the paramters from the command line */
N = atoi(argv[1]); // grid size
iter_max = atoi(argv[2]); // max. no. of iterations
start_T = atof(argv[3]); // start T for all inner grid points
tolerance = atof(argv[4]);
if (argc == 6) {
output_type = atoi(argv[5]); // ouput type
}
//ON CPU
//allocate memory
if ( (u_h = d_malloc_3d(N, N, N)) == NULL ) {
perror("array u: allocation failed");
exit(-1);
}
//allocate f
if ( (f_h = d_malloc_3d(N, N, N)) == NULL ) {
perror("array f: allocation failed");
exit(-1);
}
if ( (prev_u_h = d_malloc_3d(N, N, N)) == NULL ) {
perror("array f: allocation failed");
exit(-1);
}
//ON GPU
if ( (u_d = d_malloc_3d_gpu(N, N, N)) == NULL ) {
perror("array u: allocation failed");
exit(-1);
}
if ( (f_d = d_malloc_3d_gpu(N,N,N)) == NULL ) {
perror("array f: allocation failed");
exit(-1);
}
if ( (prev_u_d = d_malloc_3d_gpu(N, N, N)) == NULL ) {
perror("array f: allocation failed");
exit(-1);
}
printf("We are going to start the Jacobi-Iteration \n");
assign_ufu_old(u_h,f_h,prev_u_h,N, start_T);
//create event in order to time
hipEvent_t start, stop;
float elapsed;
hipEventCreate(&start); hipEventCreate(&stop);
//copy assigned Matrices to GPU
transfer_3d(u_d, u_h, N,N,N, hipMemcpyHostToDevice);
transfer_3d(prev_u_d, prev_u_h, N,N,N, hipMemcpyHostToDevice);
transfer_3d(f_d,f_h, N,N,N, hipMemcpyHostToDevice);
//start for-loop
double step_width=2./(double)N;
double denominator = 1.0/(double)6;
int i;
//define pointer for stopping-condition
double *norm=NULL;
double norm_h=100000000;
hipMalloc((void**)&norm, 1*sizeof(double));
double ***swap;
//create 3-dimensional grid
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid_size((N-2+block_size.x-1)/block_size.x, (N-2+block_size.y-1)/ block_size.y, (N-2+block_size.z-1)/block_size.z);
//start iteration
hipEventRecord(start,0);
while((i<iter_max)&&(norm_h>tolerance)){
//printf("At iteration Nr %d \n", i);
norm_h=0;
hipMemcpy(norm,&norm_h,1*sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( jacobi_gpu4), dim3(grid_size),dim3(block_size), 0, 0, u_d,prev_u_d,f_d,N, step_width, denominator, norm);
hipDeviceSynchronize();
hipMemcpy(&norm_h,norm,1*sizeof(double), hipMemcpyDeviceToHost);
printf("norm_h : %lf", norm_h);
swap = u_d;
u_d = prev_u_d;
prev_u_d = swap;
}
hipEventRecord(stop,0);
//copy matrices back to CPU
transfer_3d(u_h, u_d, N,N,N, hipMemcpyDeviceToHost);
transfer_3d(prev_u_h, prev_u_d, N,N,N, hipMemcpyDeviceToHost);
//print_matrix(u_h,N);
//print_matrix(prev_u_h,N);
//stop and destroy events
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
hipEventDestroy(start); hipEventDestroy(stop);
printf("Time %lf \n",elapsed);
// dump results if wanted
switch(output_type) {
case 0:
// no output at all
break;
case 2:
print_matrix(prev_u_h,N);
break;
default:
fprintf(stderr, "Non-supported output type!\n");
break;
}
// de-allocate memory
free(u_h);
free(prev_u_h);
free(f_h);
free_gpu(u_d);
free_gpu(prev_u_d);
free_gpu(f_d);
return(0);
}
| 97c25a58deb10be4f743f6840625e45474bf26e4.cu | #include <stdio.h>
#include "jacobi.h"
#include "alloc3d_gpu.h"
#include "transfer3d_gpu.h"
#include "alloc3d.h"
#define BLOCK_SIZE 8
void
assign_ufu_old(double*** u, double*** f, double*** u_old, int N, double start_T){
int radxi = 0,
radxf = (5 * N)/16, // (-3/8 + 1) * N/2
radyi = 0,
radyf = N/4, // (_1/2 + 1) * N/2
radzi = N/6 + (N%6 > 0), // (-2/3 + 1) * N/2 truncating upwards if there's some remainder.
radzf = N/2; // (0 + 1) * N/2
int i,j,k;
for (i=0; i<N; i++){
for (j=0;j<N;j++){
for (k=0; k<N; k++){
u_old[i][j][k]=start_T;
u[i][j][k]=start_T;
f[i][j][k]=0;
}
}
}
for(i=0;i<N;i++){
for(j=0;j<N;j++){
u_old[i][0][j] = 0.;
u_old[i][N-1][j]=20.;
u[i][0][j] = 0.;
u[i][N-1][j]=20.;
u_old[i][j][0]=20.;
u_old[i][j][N-1]=20.;
u[i][j][0]=20.;
u[i][j][N-1]=20.;
u_old[0][i][j]=20.;
u_old[N-1][i][j]=20.;
u[0][i][j]=20.;
u[N-1][i][j]=20.;
}
}
// printf("X: %d - %d. Y: %d - %d. Z: %d - %d\n", radxi, radxf, radyi, radyf, radzi, radzf);
for (i = radxi; i <= radxf; i++) {
for (j = radyi; j <= radyf; j++) {
for (k = radzi; k <= radzf; k++) {
f[i][j][k] = 200;
}
}
}
}
void print_matrix(double*** A, int N){
int i,j,k;
for (i=0; i<N; i++){
printf("\n %d -th Layer \n", i);
for(j=0; j<N; j++){
for(k=0; k<N; k++){
printf("%lf \t", A[i][j][k]);
}
printf("\n");
}
}
}
int main(int argc, char *argv[]){
int N = 5;
int iter_max = 10;
double start_T, tolerance=10;
int output_type = 0;
double ***u_h = NULL;
double ***f_h = NULL;
double ***prev_u_h=NULL;
double ***u_d=NULL;
double ***f_d=NULL;
double ***prev_u_d=NULL;
/* get the paramters from the command line */
N = atoi(argv[1]); // grid size
iter_max = atoi(argv[2]); // max. no. of iterations
start_T = atof(argv[3]); // start T for all inner grid points
tolerance = atof(argv[4]);
if (argc == 6) {
output_type = atoi(argv[5]); // ouput type
}
//ON CPU
//allocate memory
if ( (u_h = d_malloc_3d(N, N, N)) == NULL ) {
perror("array u: allocation failed");
exit(-1);
}
//allocate f
if ( (f_h = d_malloc_3d(N, N, N)) == NULL ) {
perror("array f: allocation failed");
exit(-1);
}
if ( (prev_u_h = d_malloc_3d(N, N, N)) == NULL ) {
perror("array f: allocation failed");
exit(-1);
}
//ON GPU
if ( (u_d = d_malloc_3d_gpu(N, N, N)) == NULL ) {
perror("array u: allocation failed");
exit(-1);
}
if ( (f_d = d_malloc_3d_gpu(N,N,N)) == NULL ) {
perror("array f: allocation failed");
exit(-1);
}
if ( (prev_u_d = d_malloc_3d_gpu(N, N, N)) == NULL ) {
perror("array f: allocation failed");
exit(-1);
}
printf("We are going to start the Jacobi-Iteration \n");
assign_ufu_old(u_h,f_h,prev_u_h,N, start_T);
//create event in order to time
cudaEvent_t start, stop;
float elapsed;
cudaEventCreate(&start); cudaEventCreate(&stop);
//copy assigned Matrices to GPU
transfer_3d(u_d, u_h, N,N,N, cudaMemcpyHostToDevice);
transfer_3d(prev_u_d, prev_u_h, N,N,N, cudaMemcpyHostToDevice);
transfer_3d(f_d,f_h, N,N,N, cudaMemcpyHostToDevice);
//start for-loop
double step_width=2./(double)N;
double denominator = 1.0/(double)6;
int i;
//define pointer for stopping-condition
double *norm=NULL;
double norm_h=100000000;
cudaMalloc((void**)&norm, 1*sizeof(double));
double ***swap;
//create 3-dimensional grid
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid_size((N-2+block_size.x-1)/block_size.x, (N-2+block_size.y-1)/ block_size.y, (N-2+block_size.z-1)/block_size.z);
//start iteration
cudaEventRecord(start,0);
while((i<iter_max)&&(norm_h>tolerance)){
//printf("At iteration Nr %d \n", i);
norm_h=0;
cudaMemcpy(norm,&norm_h,1*sizeof(double), cudaMemcpyHostToDevice);
jacobi_gpu4<<<grid_size,block_size>>>(u_d,prev_u_d,f_d,N, step_width, denominator, norm);
cudaDeviceSynchronize();
cudaMemcpy(&norm_h,norm,1*sizeof(double), cudaMemcpyDeviceToHost);
printf("norm_h : %lf", norm_h);
swap = u_d;
u_d = prev_u_d;
prev_u_d = swap;
}
cudaEventRecord(stop,0);
//copy matrices back to CPU
transfer_3d(u_h, u_d, N,N,N, cudaMemcpyDeviceToHost);
transfer_3d(prev_u_h, prev_u_d, N,N,N, cudaMemcpyDeviceToHost);
//print_matrix(u_h,N);
//print_matrix(prev_u_h,N);
//stop and destroy events
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start); cudaEventDestroy(stop);
printf("Time %lf \n",elapsed);
// dump results if wanted
switch(output_type) {
case 0:
// no output at all
break;
case 2:
print_matrix(prev_u_h,N);
break;
default:
fprintf(stderr, "Non-supported output type!\n");
break;
}
// de-allocate memory
free(u_h);
free(prev_u_h);
free(f_h);
free_gpu(u_d);
free_gpu(prev_u_d);
free_gpu(f_d);
return(0);
}
|
f4da63e4ac1c1d80a6d25c791bbc3c175713c9a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void _bcnn_dropout_layer_kernel(float *input, int size, float *rand, float prob, float scale)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id < size) {
input[id] = (rand[id] < prob) ? 0 : input[id] * scale;
}
} | f4da63e4ac1c1d80a6d25c791bbc3c175713c9a3.cu | #include "includes.h"
__global__ void _bcnn_dropout_layer_kernel(float *input, int size, float *rand, float prob, float scale)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id < size) {
input[id] = (rand[id] < prob) ? 0 : input[id] * scale;
}
} |
ab894abedd0ba01661f5a341c1661cd4a36ac688.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <float.h>
#include <torch/library.h>
#include <ATen/hip/Atomic.cuh>
#include "cuda_helpers.h"
namespace vision {
namespace ops {
namespace {
template <typename T>
__global__ void roi_pool_forward_kernel_impl(
int nthreads,
const T* input,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
const T* rois,
T* output,
int* argmax_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = round(offset_rois[1] * spatial_scale);
int roi_start_h = round(offset_rois[2] * spatial_scale);
int roi_end_w = round(offset_rois[3] * spatial_scale);
int roi_end_h = round(offset_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
T maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
const T* offset_input =
input + (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * width + w;
if (offset_input[input_index] > maxval) {
maxval = offset_input[input_index];
maxidx = input_index;
}
}
}
output[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename T>
__global__ void roi_pool_backward_kernel_impl(
int nthreads,
const T* grad_output,
const int* argmax_data,
int num_rois,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
T* grad_input,
const T* rois,
int n_stride,
int c_stride,
int h_stride,
int w_stride) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
T* grad_input_offset =
grad_input + ((roi_batch_ind * channels + c) * height * width);
int output_offset = n * n_stride + c * c_stride;
const int* argmax_data_offset =
argmax_data + (n * channels + c) * pooled_height * pooled_width;
int argmax = argmax_data_offset[ph * pooled_width + pw];
if (argmax != -1) {
gpuAtomicAdd(
grad_input_offset + argmax,
static_cast<T>(
grad_output[output_offset + ph * h_stride + pw * w_stride]));
}
}
}
std::tuple<at::Tensor, at::Tensor> roi_pool_forward_kernel(
const at::Tensor& input,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width) {
TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "roi_pool_forward_kernel";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
at::Tensor output = at::zeros(
{num_rois, channels, pooled_height, pooled_width}, input.options());
at::Tensor argmax = at::zeros(
{num_rois, channels, pooled_height, pooled_width},
input.options().dtype(at::kInt));
auto output_size = num_rois * pooled_height * pooled_width * channels;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(output, argmax);
}
auto input_ = input.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "roi_pool_forward_kernel", [&] {
hipLaunchKernelGGL(( roi_pool_forward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input_.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
rois_.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
argmax.data_ptr<int>());
});
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(output, argmax);
}
at::Tensor roi_pool_backward_kernel(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& argmax,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t batch_size,
int64_t channels,
int64_t height,
int64_t width) {
// Check if input tensors are CUDA tensors
TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(argmax.is_cuda(), "argmax must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
argmax_t{argmax, "argmax", 3};
at::CheckedFrom c = "roi_pool_backward_kernel";
at::checkAllSameGPU(c, {grad_t, rois_t, argmax_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device());
auto num_rois = rois.size(0);
at::Tensor grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
int n_stride = grad.stride(0);
int c_stride = grad.stride(1);
int h_stride = grad.stride(2);
int w_stride = grad.stride(3);
auto argmax_ = argmax.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "roi_pool_backward_kernel", [&] {
hipLaunchKernelGGL(( roi_pool_backward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.data_ptr<scalar_t>(),
argmax_.data_ptr<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
grad_input.data_ptr<scalar_t>(),
rois_.data_ptr<scalar_t>(),
n_stride,
c_stride,
h_stride,
w_stride);
});
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, CUDA, m) {
m.impl(
TORCH_SELECTIVE_NAME("torchvision::roi_pool"),
TORCH_FN(roi_pool_forward_kernel));
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_roi_pool_backward"),
TORCH_FN(roi_pool_backward_kernel));
}
} // namespace ops
} // namespace vision
| ab894abedd0ba01661f5a341c1661cd4a36ac688.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <float.h>
#include <torch/library.h>
#include <ATen/cuda/Atomic.cuh>
#include "cuda_helpers.h"
namespace vision {
namespace ops {
namespace {
template <typename T>
__global__ void roi_pool_forward_kernel_impl(
int nthreads,
const T* input,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
const T* rois,
T* output,
int* argmax_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = round(offset_rois[1] * spatial_scale);
int roi_start_h = round(offset_rois[2] * spatial_scale);
int roi_end_w = round(offset_rois[3] * spatial_scale);
int roi_end_h = round(offset_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
T maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
const T* offset_input =
input + (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * width + w;
if (offset_input[input_index] > maxval) {
maxval = offset_input[input_index];
maxidx = input_index;
}
}
}
output[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename T>
__global__ void roi_pool_backward_kernel_impl(
int nthreads,
const T* grad_output,
const int* argmax_data,
int num_rois,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
T* grad_input,
const T* rois,
int n_stride,
int c_stride,
int h_stride,
int w_stride) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
T* grad_input_offset =
grad_input + ((roi_batch_ind * channels + c) * height * width);
int output_offset = n * n_stride + c * c_stride;
const int* argmax_data_offset =
argmax_data + (n * channels + c) * pooled_height * pooled_width;
int argmax = argmax_data_offset[ph * pooled_width + pw];
if (argmax != -1) {
gpuAtomicAdd(
grad_input_offset + argmax,
static_cast<T>(
grad_output[output_offset + ph * h_stride + pw * w_stride]));
}
}
}
std::tuple<at::Tensor, at::Tensor> roi_pool_forward_kernel(
const at::Tensor& input,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width) {
TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "roi_pool_forward_kernel";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::cuda::CUDAGuard device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
at::Tensor output = at::zeros(
{num_rois, channels, pooled_height, pooled_width}, input.options());
at::Tensor argmax = at::zeros(
{num_rois, channels, pooled_height, pooled_width},
input.options().dtype(at::kInt));
auto output_size = num_rois * pooled_height * pooled_width * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(output, argmax);
}
auto input_ = input.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "roi_pool_forward_kernel", [&] {
roi_pool_forward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input_.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
rois_.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
argmax.data_ptr<int>());
});
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(output, argmax);
}
at::Tensor roi_pool_backward_kernel(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& argmax,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t batch_size,
int64_t channels,
int64_t height,
int64_t width) {
// Check if input tensors are CUDA tensors
TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(argmax.is_cuda(), "argmax must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
argmax_t{argmax, "argmax", 3};
at::CheckedFrom c = "roi_pool_backward_kernel";
at::checkAllSameGPU(c, {grad_t, rois_t, argmax_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::cuda::CUDAGuard device_guard(grad.device());
auto num_rois = rois.size(0);
at::Tensor grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
int n_stride = grad.stride(0);
int c_stride = grad.stride(1);
int h_stride = grad.stride(2);
int w_stride = grad.stride(3);
auto argmax_ = argmax.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "roi_pool_backward_kernel", [&] {
roi_pool_backward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.data_ptr<scalar_t>(),
argmax_.data_ptr<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
grad_input.data_ptr<scalar_t>(),
rois_.data_ptr<scalar_t>(),
n_stride,
c_stride,
h_stride,
w_stride);
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, CUDA, m) {
m.impl(
TORCH_SELECTIVE_NAME("torchvision::roi_pool"),
TORCH_FN(roi_pool_forward_kernel));
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_roi_pool_backward"),
TORCH_FN(roi_pool_backward_kernel));
}
} // namespace ops
} // namespace vision
|
326d5717dea48c1271e5b79cc29c21ebdcff2a9a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "transpo.cuh"
__global__ void gpu_transpo_kernel_naive(u_char *Source, u_char *Resultat, unsigned width, unsigned height){
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
if ((i<0)||(i>=height)||(j<0)||(j>=width)) {}
else {
Resultat[j*height + i] = Source[i*width + j];
}
}
__global__ void gpu_transpo_kernel_shared(u_char *Source, u_char *Resultat, unsigned width, unsigned height) {
__shared__ u_char tuile[BLOCKDIM_X][BLOCKDIM_Y+1];
int x = threadIdx.x;
int y = threadIdx.y;
int i = blockIdx.y*(BLOCKDIM_Y) + y;
int j = blockIdx.x*(BLOCKDIM_X) + x;
if ((i<0)||(i>=height)||(j<0)||(j>=width)) {}
else {
tuile[y][x] = Source[i*width + j];
__syncthreads();
int i = blockIdx.y*(BLOCKDIM_Y) + x;
int j = blockIdx.x*(BLOCKDIM_X) + y;
Resultat[j*height + i] = tuile[x][y];
}
}
void cpu_transpo(u_char **Source, u_char **Resultat, unsigned width, unsigned height){
#pragma omp parallel for num_threads(8)
for (unsigned i = 0; i < height; i++) {
for (unsigned j = 0; j < width; j++) {
Resultat[j][i] = Source[i][j];
}
}
}
| 326d5717dea48c1271e5b79cc29c21ebdcff2a9a.cu | #include "transpo.cuh"
__global__ void gpu_transpo_kernel_naive(u_char *Source, u_char *Resultat, unsigned width, unsigned height){
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
if ((i<0)||(i>=height)||(j<0)||(j>=width)) {}
else {
Resultat[j*height + i] = Source[i*width + j];
}
}
__global__ void gpu_transpo_kernel_shared(u_char *Source, u_char *Resultat, unsigned width, unsigned height) {
__shared__ u_char tuile[BLOCKDIM_X][BLOCKDIM_Y+1];
int x = threadIdx.x;
int y = threadIdx.y;
int i = blockIdx.y*(BLOCKDIM_Y) + y;
int j = blockIdx.x*(BLOCKDIM_X) + x;
if ((i<0)||(i>=height)||(j<0)||(j>=width)) {}
else {
tuile[y][x] = Source[i*width + j];
__syncthreads();
int i = blockIdx.y*(BLOCKDIM_Y) + x;
int j = blockIdx.x*(BLOCKDIM_X) + y;
Resultat[j*height + i] = tuile[x][y];
}
}
void cpu_transpo(u_char **Source, u_char **Resultat, unsigned width, unsigned height){
#pragma omp parallel for num_threads(8)
for (unsigned i = 0; i < height; i++) {
for (unsigned j = 0; j < width; j++) {
Resultat[j][i] = Source[i][j];
}
}
}
|
07a8a2fbbc04d508dc5061b4616eb372882d1c93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> s d c
@author Stan Tomov
@author Mathieu Faverge
@author Ichitaro Yamazaki
@author Mark Gates
*/
#include "magma_internal.h"
// MAX_PIVOTS is maximum number of pivots to apply in each kernel launch
// NTHREADS is number of threads in a block
// 64 and 256 are better on Kepler;
//#define MAX_PIVOTS 64
//#define NTHREADS 256
#define MAX_PIVOTS 32
#define NTHREADS 64
typedef struct {
magmaDoubleComplex *dA;
int n, lda, j0, npivots;
int ipiv[MAX_PIVOTS];
} zlaswp_sym_params_t;
// Matrix A is stored row or column-wise in dA.
// Divide matrix A into block-columns of NTHREADS columns each.
// Each GPU block processes one block-column of A.
// Each thread goes down a column of A,
// swapping rows according to pivots stored in params.
__global__ void zlaswp_sym_kernel( zlaswp_sym_params_t params )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
if ( tid < params.n ) {
for( int ii = params.j0; ii < params.npivots; ++ii ) {
int i1 = ii;
int i2 = params.ipiv[ii];
// swap: i1 <-> i2
// this thread is responsible for the tid-th element
magmaDoubleComplex *A1 = NULL, *A2 = NULL;
if (tid < i1) {
// row swap: (i1,tid) <-> (i2,tid)
A1 = params.dA + tid*params.lda + i1;
A2 = params.dA + tid*params.lda + i2;
} else if (tid == i1) {
// diagonal swap: (i1,i1) <-> (i2,i2)
A1 = params.dA + i1*params.lda + i1;
A2 = params.dA + i2*params.lda + i2;
} else if (tid < i2) {
// row-col swap: (tid,i1) <-> (i2,tid)
A1 = params.dA + i1*params.lda + tid;
A2 = params.dA + tid*params.lda + i2;
} else if (tid == i2) {
// diagonal swap: done by i1-th thread
} else if (tid > i2) {
// column swap: (tid,i1) <-> (tid,i2)
A1 = params.dA + i1*params.lda + tid;
A2 = params.dA + i2*params.lda + tid;
}
if ( A1 != NULL && A2 != NULL) {
magmaDoubleComplex temp = *A1;
*A1 = *A2;
*A2 = temp;
}
}
}
}
// Launch zlaswp_sym kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each.
extern "C" void zlaswp_sym( zlaswp_sym_params_t ¶ms, magma_queue_t queue )
{
int blocks = magma_ceildiv(params.n, NTHREADS);
hipLaunchKernelGGL(( zlaswp_sym_kernel), dim3(blocks), dim3(NTHREADS), 0, queue->cuda_stream() , params );
}
/***************************************************************************//**
Purpose:
=============
ZLASWP_SYM applies a series of symmetric pivoting on a symmetric matrix A.
Currently, it is only implemented for the lower-triangular part of the matrix.
Arguments:
==========
@param[in]
n INTEGER
The number of columns of the matrix A.
@param[in,out]
dA COMPLEX*16 array on GPU, dimension (*,*)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
@param[in]
lda INTEGER
Stride between elements in same column.
@param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (One based index.)
@param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (One based index.)
@param[in]
ipiv INTEGER array, on CPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
@param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laswp_sym
*******************************************************************************/
extern "C" void
magmablas_zlaswp_sym(
magma_int_t n, magmaDoubleComplex *dA, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 0 )
info = -4;
else if ( k2 < 0 || k2 < k1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
for( int k = k1-1; k < k2; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, k2-k );
// fields are: dA n lda j0 npivots
zlaswp_sym_params_t params = { dA, int(n), int(lda), int(k), int(k+npivots) };
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[(k+j)*inci] - 1;
}
zlaswp_sym( params, queue );
}
}
| 07a8a2fbbc04d508dc5061b4616eb372882d1c93.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> s d c
@author Stan Tomov
@author Mathieu Faverge
@author Ichitaro Yamazaki
@author Mark Gates
*/
#include "magma_internal.h"
// MAX_PIVOTS is maximum number of pivots to apply in each kernel launch
// NTHREADS is number of threads in a block
// 64 and 256 are better on Kepler;
//#define MAX_PIVOTS 64
//#define NTHREADS 256
#define MAX_PIVOTS 32
#define NTHREADS 64
typedef struct {
magmaDoubleComplex *dA;
int n, lda, j0, npivots;
int ipiv[MAX_PIVOTS];
} zlaswp_sym_params_t;
// Matrix A is stored row or column-wise in dA.
// Divide matrix A into block-columns of NTHREADS columns each.
// Each GPU block processes one block-column of A.
// Each thread goes down a column of A,
// swapping rows according to pivots stored in params.
__global__ void zlaswp_sym_kernel( zlaswp_sym_params_t params )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
if ( tid < params.n ) {
for( int ii = params.j0; ii < params.npivots; ++ii ) {
int i1 = ii;
int i2 = params.ipiv[ii];
// swap: i1 <-> i2
// this thread is responsible for the tid-th element
magmaDoubleComplex *A1 = NULL, *A2 = NULL;
if (tid < i1) {
// row swap: (i1,tid) <-> (i2,tid)
A1 = params.dA + tid*params.lda + i1;
A2 = params.dA + tid*params.lda + i2;
} else if (tid == i1) {
// diagonal swap: (i1,i1) <-> (i2,i2)
A1 = params.dA + i1*params.lda + i1;
A2 = params.dA + i2*params.lda + i2;
} else if (tid < i2) {
// row-col swap: (tid,i1) <-> (i2,tid)
A1 = params.dA + i1*params.lda + tid;
A2 = params.dA + tid*params.lda + i2;
} else if (tid == i2) {
// diagonal swap: done by i1-th thread
} else if (tid > i2) {
// column swap: (tid,i1) <-> (tid,i2)
A1 = params.dA + i1*params.lda + tid;
A2 = params.dA + i2*params.lda + tid;
}
if ( A1 != NULL && A2 != NULL) {
magmaDoubleComplex temp = *A1;
*A1 = *A2;
*A2 = temp;
}
}
}
}
// Launch zlaswp_sym kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each.
extern "C" void zlaswp_sym( zlaswp_sym_params_t ¶ms, magma_queue_t queue )
{
int blocks = magma_ceildiv(params.n, NTHREADS);
zlaswp_sym_kernel<<< blocks, NTHREADS, 0, queue->cuda_stream() >>>( params );
}
/***************************************************************************//**
Purpose:
=============
ZLASWP_SYM applies a series of symmetric pivoting on a symmetric matrix A.
Currently, it is only implemented for the lower-triangular part of the matrix.
Arguments:
==========
@param[in]
n INTEGER
The number of columns of the matrix A.
@param[in,out]
dA COMPLEX*16 array on GPU, dimension (*,*)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
@param[in]
lda INTEGER
Stride between elements in same column.
@param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (One based index.)
@param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (One based index.)
@param[in]
ipiv INTEGER array, on CPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
@param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laswp_sym
*******************************************************************************/
extern "C" void
magmablas_zlaswp_sym(
magma_int_t n, magmaDoubleComplex *dA, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 0 )
info = -4;
else if ( k2 < 0 || k2 < k1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
for( int k = k1-1; k < k2; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, k2-k );
// fields are: dA n lda j0 npivots
zlaswp_sym_params_t params = { dA, int(n), int(lda), int(k), int(k+npivots) };
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[(k+j)*inci] - 1;
}
zlaswp_sym( params, queue );
}
}
|
2530c75b466d9f6ce09d300c4fbe6a46920e0c79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
struct node{
int dst;
struct node* next;
};
struct list{
struct node *head;
};
struct graph{
int n;
struct list* set;
};
extern __managed__ struct node* newnode;
extern __managed__ struct graph* newgraph;
/*struct node* new_node(int dst){
hipMallocManaged(&newnode, sizeof(struct node), (unsigned int)hipMemAttachGlobal);
hipMemAdvise(newnode, sizeof(struct node), hipMemAdviseSetAccessedBy, hipCpuDeviceId);
newnode -> dst = dst;
newnode -> next = NULL;
return newnode;
}*/
void new_node(int dst){
hipMallocManaged(&newnode, sizeof(struct node), (unsigned int)hipMemAttachGlobal);
hipMemAdvise(newnode, sizeof(struct node), hipMemAdviseSetAccessedBy, hipCpuDeviceId);
newnode -> dst = dst;
newnode -> next = NULL;
}
/*struct graph* new_graph(int n){
hipMallocManaged(&newgraph, sizeof(struct graph), (unsigned int)hipMemAttachGlobal);
hipMemAdvise(newgraph, sizeof(struct graph), hipMemAdviseSetAccessedBy, hipCpuDeviceId);
newgraph -> n = n;
newgraph -> set = (struct list*)malloc(n * sizeof(struct list)) ;
int i;
for(i=0;i<n;i++)
newgraph->set[i].head = NULL;
return newgraph;
}*/
void new_graph(int n){
hipMallocManaged(&newgraph, sizeof(struct graph), (unsigned int)hipMemAttachGlobal);
hipMemAdvise(newgraph, sizeof(struct graph), hipMemAdviseSetAccessedBy, hipCpuDeviceId);
newgraph -> n = n;
newgraph -> set = (struct list*)malloc(n * sizeof(struct list)) ;
int i;
for(i=0;i<n;i++)
newgraph->set[i].head = NULL;
}
/*void addEdge(struct graph* gph, int src, int dst){
struct node* newnode = new_node(dst);
newnode->next = gph->set[src].head;
gph->set[src].head = newnode;
newnode = new_node(src);
newnode->next = gph->set[dst].head;
gph->set[dst].head = newnode;
}*/
void addEdge( int src, int dst){
new_node(dst);
newnode->next = newgraph->set[src].head;
newgraph->set[src].head = newnode;
new_node(src);
newnode->next = newgraph->set[dst].head;
newgraph->set[dst].head = newnode;
}
__global__ void count(int* auth_num) {
// Calculate the index in the vector for the thread using the internal variables
int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE
// This if statement is added in case we have more threads executing
// Than number of elements in the vectors. How can this help?
int co_auth = 0;
struct node* vert_node = newgraph->set[tid].head;
//printf("\n Adjacency list of vertex %d\n head ", v);
/*while (vert_node)
{
//printf("-> %d", vert_node->dst);
vert_node = vert_node->next;
co_auth++;
}*/
auth_num[tid] = vert_node->dst;
}
//Utility functions to read the file
long get_vert(char *str){
char vert[20];
int space_count = 0;
int num_vert=0;
int i=0, j=0;
while(str[i] != '\n'){
if(str[i] == ' ')
space_count++;
if(space_count == 2){
vert[j] = str[i];
j++;
}
else if(space_count>2)
break;
i++;
}
vert[j] = '\0';
//printf("%s\n", vert);
num_vert = atoi(vert);
//printf("%d\n", num_vert);
return num_vert;
}
int get_src(char *str){
char s[20];
int space_count = 0;
int src=0;
int i=0, j=0;
while(str[i] != '\n'){
if(str[i] == ' ')
space_count++;
if(space_count == 0){
s[j] = str[i];
j++;
}
else
break;
i++;
}
s[j] = '\0';
//printf("%s\n", s);
src = atoi(s);
//printf("%d\n", src);
return src;
}
int get_dst(char *str){
char d[20];
int space_count = 0;
int dst=0;
int i=0, j=0;
while(str[i] != '\n'){
if(str[i] == ' ')
space_count++;
if(space_count == 1){
d[j] = str[i];
j++;
}
else if(space_count>1)
break;
i++;
}
d[j] = '\0';
//printf("%s\n", d);
dst = atoi(d);
//printf("%d\n", dst);
return dst;
}
int compare (const void * a, const void * b)
{
return ( *(int*)b - *(int*)a );
}
int main() {
FILE *fp;
char str[200];
const char* file = "dblp-co-authors.txt";
fp = fopen(file, "r");
if (fp == NULL){
printf("Could not open file %s",file);
return 1;
}
int vert;
fgets(str, 200, fp);
fgets(str, 200, fp);
fgets(str, 200, fp);
fgets(str, 200, fp);
fgets(str, 200, fp);
//printf("%s", str);
vert = get_vert(str);
long src, dst;
new_graph(vert);
//struct graph* gph = new_graph(vert);
while (fgets(str, 200, fp) != NULL){
//printf("%s", str);
src = get_src(str);
dst = get_dst(str);
addEdge(src,dst);
}
printf("Graph Created....\n");
/*for(int v=0;v<10;v++){
struct node* vert_node = newgraph->set[v].head;
checkauth=0;
printf("\n Adjacency list of vertex %d\n head ", v);
while (vert_node)
{
printf("-> %d", vert_node->dst);
vert_node = vert_node->next;
}
}*/
// Set GPU Variables based on input arguments
int graph_size = newgraph->n;
int block_size = 512;
int grid_size = ((graph_size-1)/block_size) + 1;
// Set device that we will use for our cuda code
hipSetDevice(0);
// Time Variables
hipEvent_t start, stop;
float time;
hipEventCreate (&start);
hipEventCreate (&stop);
// Input Arrays and variables
int *auth_num = new int [graph_size];
// Pointers in GPU memory
int *auth_num_gpu;
struct graph *gph_gpu;
int actual_size = 1049866 * sizeof(struct graph);
int num_size = graph_size * sizeof(int);
// allocate the memory on the GPU
//hipMalloc(&gph_gpu, actual_size);
//hipMalloc(&auth_num_gpu, num_size);
// copy the arrays 'a' and 'b' to the GPU
//hipMemcpy(gph_gpu,gph,actual_size,hipMemcpyHostToDevice);
//
// GPU Calculation
////////////////////////
printf("Counting....\n");
hipEventRecord(start,0);
// call the kernel
//count<<<grid_size,block_size>>>(auth_num_gpu);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
//printf("\tParallel Job Time: %.2f ms\n", time);
// copy the array 'c' back from the GPU to the CPU
// HERE (there's one more at the end, don't miss it!)
//hipMemcpy(auth_num,auth_num_gpu,num_size,hipMemcpyDeviceToHost);
/*for(int i=0;i<graph_size;i++)
printf("Authors: %d\n",auth_num[i]);*/
// free CPU data
free (newgraph);
free (auth_num);
// free the memory allocated on the GPU
// HERE
//hipFree(auth_num_gpu);
return 0;
}
| 2530c75b466d9f6ce09d300c4fbe6a46920e0c79.cu | #include <stdio.h>
#include <stdlib.h>
struct node{
int dst;
struct node* next;
};
struct list{
struct node *head;
};
struct graph{
int n;
struct list* set;
};
extern __managed__ struct node* newnode;
extern __managed__ struct graph* newgraph;
/*struct node* new_node(int dst){
cudaMallocManaged(&newnode, sizeof(struct node), (unsigned int)cudaMemAttachGlobal);
cudaMemAdvise(newnode, sizeof(struct node), cudaMemAdviseSetAccessedBy, cudaCpuDeviceId);
newnode -> dst = dst;
newnode -> next = NULL;
return newnode;
}*/
void new_node(int dst){
cudaMallocManaged(&newnode, sizeof(struct node), (unsigned int)cudaMemAttachGlobal);
cudaMemAdvise(newnode, sizeof(struct node), cudaMemAdviseSetAccessedBy, cudaCpuDeviceId);
newnode -> dst = dst;
newnode -> next = NULL;
}
/*struct graph* new_graph(int n){
cudaMallocManaged(&newgraph, sizeof(struct graph), (unsigned int)cudaMemAttachGlobal);
cudaMemAdvise(newgraph, sizeof(struct graph), cudaMemAdviseSetAccessedBy, cudaCpuDeviceId);
newgraph -> n = n;
newgraph -> set = (struct list*)malloc(n * sizeof(struct list)) ;
int i;
for(i=0;i<n;i++)
newgraph->set[i].head = NULL;
return newgraph;
}*/
void new_graph(int n){
cudaMallocManaged(&newgraph, sizeof(struct graph), (unsigned int)cudaMemAttachGlobal);
cudaMemAdvise(newgraph, sizeof(struct graph), cudaMemAdviseSetAccessedBy, cudaCpuDeviceId);
newgraph -> n = n;
newgraph -> set = (struct list*)malloc(n * sizeof(struct list)) ;
int i;
for(i=0;i<n;i++)
newgraph->set[i].head = NULL;
}
/*void addEdge(struct graph* gph, int src, int dst){
struct node* newnode = new_node(dst);
newnode->next = gph->set[src].head;
gph->set[src].head = newnode;
newnode = new_node(src);
newnode->next = gph->set[dst].head;
gph->set[dst].head = newnode;
}*/
void addEdge( int src, int dst){
new_node(dst);
newnode->next = newgraph->set[src].head;
newgraph->set[src].head = newnode;
new_node(src);
newnode->next = newgraph->set[dst].head;
newgraph->set[dst].head = newnode;
}
__global__ void count(int* auth_num) {
// Calculate the index in the vector for the thread using the internal variables
int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE
// This if statement is added in case we have more threads executing
// Than number of elements in the vectors. How can this help?
int co_auth = 0;
struct node* vert_node = newgraph->set[tid].head;
//printf("\n Adjacency list of vertex %d\n head ", v);
/*while (vert_node)
{
//printf("-> %d", vert_node->dst);
vert_node = vert_node->next;
co_auth++;
}*/
auth_num[tid] = vert_node->dst;
}
//Utility functions to read the file
long get_vert(char *str){
char vert[20];
int space_count = 0;
int num_vert=0;
int i=0, j=0;
while(str[i] != '\n'){
if(str[i] == ' ')
space_count++;
if(space_count == 2){
vert[j] = str[i];
j++;
}
else if(space_count>2)
break;
i++;
}
vert[j] = '\0';
//printf("%s\n", vert);
num_vert = atoi(vert);
//printf("%d\n", num_vert);
return num_vert;
}
int get_src(char *str){
char s[20];
int space_count = 0;
int src=0;
int i=0, j=0;
while(str[i] != '\n'){
if(str[i] == ' ')
space_count++;
if(space_count == 0){
s[j] = str[i];
j++;
}
else
break;
i++;
}
s[j] = '\0';
//printf("%s\n", s);
src = atoi(s);
//printf("%d\n", src);
return src;
}
int get_dst(char *str){
char d[20];
int space_count = 0;
int dst=0;
int i=0, j=0;
while(str[i] != '\n'){
if(str[i] == ' ')
space_count++;
if(space_count == 1){
d[j] = str[i];
j++;
}
else if(space_count>1)
break;
i++;
}
d[j] = '\0';
//printf("%s\n", d);
dst = atoi(d);
//printf("%d\n", dst);
return dst;
}
int compare (const void * a, const void * b)
{
return ( *(int*)b - *(int*)a );
}
int main() {
FILE *fp;
char str[200];
const char* file = "dblp-co-authors.txt";
fp = fopen(file, "r");
if (fp == NULL){
printf("Could not open file %s",file);
return 1;
}
int vert;
fgets(str, 200, fp);
fgets(str, 200, fp);
fgets(str, 200, fp);
fgets(str, 200, fp);
fgets(str, 200, fp);
//printf("%s", str);
vert = get_vert(str);
long src, dst;
new_graph(vert);
//struct graph* gph = new_graph(vert);
while (fgets(str, 200, fp) != NULL){
//printf("%s", str);
src = get_src(str);
dst = get_dst(str);
addEdge(src,dst);
}
printf("Graph Created....\n");
/*for(int v=0;v<10;v++){
struct node* vert_node = newgraph->set[v].head;
checkauth=0;
printf("\n Adjacency list of vertex %d\n head ", v);
while (vert_node)
{
printf("-> %d", vert_node->dst);
vert_node = vert_node->next;
}
}*/
// Set GPU Variables based on input arguments
int graph_size = newgraph->n;
int block_size = 512;
int grid_size = ((graph_size-1)/block_size) + 1;
// Set device that we will use for our cuda code
cudaSetDevice(0);
// Time Variables
cudaEvent_t start, stop;
float time;
cudaEventCreate (&start);
cudaEventCreate (&stop);
// Input Arrays and variables
int *auth_num = new int [graph_size];
// Pointers in GPU memory
int *auth_num_gpu;
struct graph *gph_gpu;
int actual_size = 1049866 * sizeof(struct graph);
int num_size = graph_size * sizeof(int);
// allocate the memory on the GPU
//cudaMalloc(&gph_gpu, actual_size);
//cudaMalloc(&auth_num_gpu, num_size);
// copy the arrays 'a' and 'b' to the GPU
//cudaMemcpy(gph_gpu,gph,actual_size,cudaMemcpyHostToDevice);
//
// GPU Calculation
////////////////////////
printf("Counting....\n");
cudaEventRecord(start,0);
// call the kernel
//count<<<grid_size,block_size>>>(auth_num_gpu);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
//printf("\tParallel Job Time: %.2f ms\n", time);
// copy the array 'c' back from the GPU to the CPU
// HERE (there's one more at the end, don't miss it!)
//cudaMemcpy(auth_num,auth_num_gpu,num_size,cudaMemcpyDeviceToHost);
/*for(int i=0;i<graph_size;i++)
printf("Authors: %d\n",auth_num[i]);*/
// free CPU data
free (newgraph);
free (auth_num);
// free the memory allocated on the GPU
// HERE
//cudaFree(auth_num_gpu);
return 0;
}
|
b810974fa197706c1b6e19af9763879b1dc038be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "BrokenLineFitOnGPU.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
void HelixFitOnGPU::launchBrokenLineKernels(HitsView const *hv,
uint32_t hitsInFit,
uint32_t maxNumberOfTuples,
hipStream_t stream) {
assert(tuples_d);
auto blockSize = 64;
auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize;
// Fit internals
auto hitsGPU_ = cms::cuda::make_device_unique<double[]>(
maxNumberOfConcurrentFits_ * sizeof(Rfit::Matrix3xNd<4>) / sizeof(double), stream);
auto hits_geGPU_ = cms::cuda::make_device_unique<float[]>(
maxNumberOfConcurrentFits_ * sizeof(Rfit::Matrix6x4f) / sizeof(float), stream);
auto fast_fit_resultsGPU_ = cms::cuda::make_device_unique<double[]>(
maxNumberOfConcurrentFits_ * sizeof(Rfit::Vector4d) / sizeof(double), stream);
for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) {
// fit triplets
hipLaunchKernelGGL(( kernelBLFastFit<3>), dim3(numberOfBlocks), dim3(blockSize), 0, stream,
tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 3, offset);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( kernelBLFit<3>), dim3(numberOfBlocks), dim3(blockSize), 0, stream, tupleMultiplicity_d,
bField_,
outputSoa_d,
hitsGPU_.get(),
hits_geGPU_.get(),
fast_fit_resultsGPU_.get(),
3,
offset);
cudaCheck(hipGetLastError());
// fit quads
hipLaunchKernelGGL(( kernelBLFastFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream,
tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 4, offset);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( kernelBLFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_d,
bField_,
outputSoa_d,
hitsGPU_.get(),
hits_geGPU_.get(),
fast_fit_resultsGPU_.get(),
4,
offset);
cudaCheck(hipGetLastError());
if (fit5as4_) {
// fit penta (only first 4)
hipLaunchKernelGGL(( kernelBLFastFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream,
tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( kernelBLFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_d,
bField_,
outputSoa_d,
hitsGPU_.get(),
hits_geGPU_.get(),
fast_fit_resultsGPU_.get(),
5,
offset);
cudaCheck(hipGetLastError());
} else {
// fit penta (all 5)
hipLaunchKernelGGL(( kernelBLFastFit<5>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream,
tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( kernelBLFit<5>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_d,
bField_,
outputSoa_d,
hitsGPU_.get(),
hits_geGPU_.get(),
fast_fit_resultsGPU_.get(),
5,
offset);
cudaCheck(hipGetLastError());
}
} // loop on concurrent fits
}
| b810974fa197706c1b6e19af9763879b1dc038be.cu | #include "BrokenLineFitOnGPU.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
void HelixFitOnGPU::launchBrokenLineKernels(HitsView const *hv,
uint32_t hitsInFit,
uint32_t maxNumberOfTuples,
cudaStream_t stream) {
assert(tuples_d);
auto blockSize = 64;
auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize;
// Fit internals
auto hitsGPU_ = cms::cuda::make_device_unique<double[]>(
maxNumberOfConcurrentFits_ * sizeof(Rfit::Matrix3xNd<4>) / sizeof(double), stream);
auto hits_geGPU_ = cms::cuda::make_device_unique<float[]>(
maxNumberOfConcurrentFits_ * sizeof(Rfit::Matrix6x4f) / sizeof(float), stream);
auto fast_fit_resultsGPU_ = cms::cuda::make_device_unique<double[]>(
maxNumberOfConcurrentFits_ * sizeof(Rfit::Vector4d) / sizeof(double), stream);
for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) {
// fit triplets
kernelBLFastFit<3><<<numberOfBlocks, blockSize, 0, stream>>>(
tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 3, offset);
cudaCheck(cudaGetLastError());
kernelBLFit<3><<<numberOfBlocks, blockSize, 0, stream>>>(tupleMultiplicity_d,
bField_,
outputSoa_d,
hitsGPU_.get(),
hits_geGPU_.get(),
fast_fit_resultsGPU_.get(),
3,
offset);
cudaCheck(cudaGetLastError());
// fit quads
kernelBLFastFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(
tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 4, offset);
cudaCheck(cudaGetLastError());
kernelBLFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_d,
bField_,
outputSoa_d,
hitsGPU_.get(),
hits_geGPU_.get(),
fast_fit_resultsGPU_.get(),
4,
offset);
cudaCheck(cudaGetLastError());
if (fit5as4_) {
// fit penta (only first 4)
kernelBLFastFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(
tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset);
cudaCheck(cudaGetLastError());
kernelBLFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_d,
bField_,
outputSoa_d,
hitsGPU_.get(),
hits_geGPU_.get(),
fast_fit_resultsGPU_.get(),
5,
offset);
cudaCheck(cudaGetLastError());
} else {
// fit penta (all 5)
kernelBLFastFit<5><<<numberOfBlocks / 4, blockSize, 0, stream>>>(
tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset);
cudaCheck(cudaGetLastError());
kernelBLFit<5><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_d,
bField_,
outputSoa_d,
hitsGPU_.get(),
hits_geGPU_.get(),
fast_fit_resultsGPU_.get(),
5,
offset);
cudaCheck(cudaGetLastError());
}
} // loop on concurrent fits
}
|
42a215d2d972ba1f565402f169ccb5806c466052.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<assert.h>
#include<algorithm>
#define N 10000
#define K 30
#define RadixBit 2
#define RadixSize 4//2 ^ RadixBit
#define RadixMask 3
using namespace std;
__device__ int get_Laneid() {
int laneId;
asm("mov.s32 %0, %%laneid;" : "=r"(laneId) );
return laneId;
}
__device__ unsigned int getBitwise(unsigned int val,unsigned int pos,unsigned int bits) {
unsigned int m = (1 << bits) - 1u;
return (val >> pos) & m;
}
__device__ unsigned int setBitwise(unsigned int val, unsigned int insertval,unsigned int pos, unsigned int bits) {
unsigned int m = 1 << bits - 1u;
insertval <<= pos;
m <<= pos;
return (val & ~m) | insertval;
}
__device__ void countingMaskedval(int *count,
unsigned int *data,
unsigned int desired,
unsigned int mask,
int pos,
unsigned int *smem,
unsigned int n) {
for(int i = 0;i < RadixSize;i++) {
count[i] = 0;
}
//initializer
if(threadIdx.x < RadixSize) {
smem[threadIdx.x] = 0;
}
__syncthreads();
for(unsigned int i = threadIdx.x;i < n;i += blockDim.x) {
unsigned int valbit = getBitwise(data[i], pos, RadixBit);
bool hasval = ((data[i] & mask) == desired);
for(int j = 0;j < RadixSize;j++) {
bool vote = hasval && (valbit == j);
count[j] += __popc(__ballot_sync(0xffffffff,vote));
}
}
if(get_Laneid() == 0) {
for(int i = 0;i < RadixSize;i++) {
atomicAdd(smem + i, count[i]);
}
}
__syncthreads();
for(int i = 0;i < RadixSize;i++) {
count[i] = smem[i];
}
__syncthreads();
}
unsigned int __device__ findPattern(unsigned int *data,unsigned int *smem, unsigned int desired, unsigned int mask, unsigned int n) {
if (threadIdx.x < 2) {
smem[threadIdx.x] = 0;
}
__syncthreads();
for(int i = threadIdx.x;i < n;i += blockDim.x) {
if((data[i] & mask) == desired) {
smem[0] = 1;
smem[1] = data[i];
}
}
__syncthreads();
unsigned int found = smem[0];
unsigned int val = smem[1];
__syncthreads();
if(found == 1) {
//one thread find the unique data
//and every return this value
//printf("%u ",val);
return val;
}
assert(false);
//do not find the data
printf("%u ",val);
return 0;
}
void __device__ RadixSelect(unsigned int *data,unsigned int n,unsigned int *smem, int k, unsigned int *topk) {
//every thread has mask,desired,count to deal N/blockDim.x datas
int count[RadixSize];
unsigned int desired = 0;
unsigned int mask = 0;
int ktofind = k;
unsigned int ret;
for(int pos = sizeof(unsigned int)*8 - RadixBit;pos >=0;pos -= RadixBit) {
countingMaskedval(count, data, desired, mask, pos, smem, n);
auto find_unique = [&](int i, int counts) {
if(counts == 1 && ktofind == 1) {
desired = setBitwise(desired, i, pos, RadixBit);
mask = setBitwise(mask, RadixMask, pos, RadixBit);
//in every thread's head
//now we know somedata & mask = desired is unique,and we will find this data;
*topk = findPattern(data, smem, desired, mask, n);
return true;
}
return false;
};
auto find_non_unique = [&](int i, int counts) {
if(counts >= ktofind) {
desired = setBitwise(desired, i, pos, RadixBit);
mask = setBitwise(mask, RadixMask, pos, RadixBit);
//continue find and the topk is in which & mask = desired
return true;
}
//continue find
ktofind -= counts;
return false;
};
for(int i = RadixSize - 1;i >= 0;i--) {
int c = count[i];
if(find_unique(i, c)) {
return;
}
if(find_non_unique(i, c)) {
//continue
break;
}
}
}
//the topk has some same data,we return the same data is ok
*topk = desired;
}
void __global__ findtopK(unsigned int* data, unsigned int n, unsigned int *topk) {
__shared__ unsigned int smem[64];
RadixSelect(data, n, smem, blockIdx.x + 1, topk + blockIdx.x);
}
int main() {
unsigned int data[N],*data_dev;
unsigned int topk[K],*topk_dev;
for(int i = 0;i < N;i++) {
data[i] = random()%1000;
}
hipMalloc((void**)&data_dev, sizeof(data));
hipMalloc((void**)&topk_dev, sizeof(topk));
hipMemcpy(data_dev, data ,sizeof(data), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( findtopK), dim3(K),dim3(1024), 0, 0, data_dev, N, topk_dev);
hipMemcpy(topk, topk_dev ,sizeof(topk), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
sort(data, data + N, [](unsigned int a,unsigned int b) {
return a > b;
});
for(int i = 0;i < K;i++) {
if(data[i] != topk[i]) {
cout << "faild !!!" << "index: " << i << "cpu:" << data[i] << " gpu: " << topk[i] <<endl;
}
}
return 0;
}
| 42a215d2d972ba1f565402f169ccb5806c466052.cu | #include<iostream>
#include<assert.h>
#include<algorithm>
#define N 10000
#define K 30
#define RadixBit 2
#define RadixSize 4//2 ^ RadixBit
#define RadixMask 3
using namespace std;
__device__ int get_Laneid() {
int laneId;
asm("mov.s32 %0, %%laneid;" : "=r"(laneId) );
return laneId;
}
__device__ unsigned int getBitwise(unsigned int val,unsigned int pos,unsigned int bits) {
unsigned int m = (1 << bits) - 1u;
return (val >> pos) & m;
}
__device__ unsigned int setBitwise(unsigned int val, unsigned int insertval,unsigned int pos, unsigned int bits) {
unsigned int m = 1 << bits - 1u;
insertval <<= pos;
m <<= pos;
return (val & ~m) | insertval;
}
__device__ void countingMaskedval(int *count,
unsigned int *data,
unsigned int desired,
unsigned int mask,
int pos,
unsigned int *smem,
unsigned int n) {
for(int i = 0;i < RadixSize;i++) {
count[i] = 0;
}
//initializer
if(threadIdx.x < RadixSize) {
smem[threadIdx.x] = 0;
}
__syncthreads();
for(unsigned int i = threadIdx.x;i < n;i += blockDim.x) {
unsigned int valbit = getBitwise(data[i], pos, RadixBit);
bool hasval = ((data[i] & mask) == desired);
for(int j = 0;j < RadixSize;j++) {
bool vote = hasval && (valbit == j);
count[j] += __popc(__ballot_sync(0xffffffff,vote));
}
}
if(get_Laneid() == 0) {
for(int i = 0;i < RadixSize;i++) {
atomicAdd(smem + i, count[i]);
}
}
__syncthreads();
for(int i = 0;i < RadixSize;i++) {
count[i] = smem[i];
}
__syncthreads();
}
unsigned int __device__ findPattern(unsigned int *data,unsigned int *smem, unsigned int desired, unsigned int mask, unsigned int n) {
if (threadIdx.x < 2) {
smem[threadIdx.x] = 0;
}
__syncthreads();
for(int i = threadIdx.x;i < n;i += blockDim.x) {
if((data[i] & mask) == desired) {
smem[0] = 1;
smem[1] = data[i];
}
}
__syncthreads();
unsigned int found = smem[0];
unsigned int val = smem[1];
__syncthreads();
if(found == 1) {
//one thread find the unique data
//and every return this value
//printf("%u ",val);
return val;
}
assert(false);
//do not find the data
printf("%u ",val);
return 0;
}
void __device__ RadixSelect(unsigned int *data,unsigned int n,unsigned int *smem, int k, unsigned int *topk) {
//every thread has mask,desired,count to deal N/blockDim.x datas
int count[RadixSize];
unsigned int desired = 0;
unsigned int mask = 0;
int ktofind = k;
unsigned int ret;
for(int pos = sizeof(unsigned int)*8 - RadixBit;pos >=0;pos -= RadixBit) {
countingMaskedval(count, data, desired, mask, pos, smem, n);
auto find_unique = [&](int i, int counts) {
if(counts == 1 && ktofind == 1) {
desired = setBitwise(desired, i, pos, RadixBit);
mask = setBitwise(mask, RadixMask, pos, RadixBit);
//in every thread's head
//now we know somedata & mask = desired is unique,and we will find this data;
*topk = findPattern(data, smem, desired, mask, n);
return true;
}
return false;
};
auto find_non_unique = [&](int i, int counts) {
if(counts >= ktofind) {
desired = setBitwise(desired, i, pos, RadixBit);
mask = setBitwise(mask, RadixMask, pos, RadixBit);
//continue find and the topk is in which & mask = desired
return true;
}
//continue find
ktofind -= counts;
return false;
};
for(int i = RadixSize - 1;i >= 0;i--) {
int c = count[i];
if(find_unique(i, c)) {
return;
}
if(find_non_unique(i, c)) {
//continue
break;
}
}
}
//the topk has some same data,we return the same data is ok
*topk = desired;
}
void __global__ findtopK(unsigned int* data, unsigned int n, unsigned int *topk) {
__shared__ unsigned int smem[64];
RadixSelect(data, n, smem, blockIdx.x + 1, topk + blockIdx.x);
}
int main() {
unsigned int data[N],*data_dev;
unsigned int topk[K],*topk_dev;
for(int i = 0;i < N;i++) {
data[i] = random()%1000;
}
cudaMalloc((void**)&data_dev, sizeof(data));
cudaMalloc((void**)&topk_dev, sizeof(topk));
cudaMemcpy(data_dev, data ,sizeof(data), cudaMemcpyHostToDevice);
findtopK<<<K,1024>>>(data_dev, N, topk_dev);
cudaMemcpy(topk, topk_dev ,sizeof(topk), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
sort(data, data + N, [](unsigned int a,unsigned int b) {
return a > b;
});
for(int i = 0;i < K;i++) {
if(data[i] != topk[i]) {
cout << "faild !!!" << "index: " << i << "cpu:" << data[i] << " gpu: " << topk[i] <<endl;
}
}
return 0;
}
|
8ac657f52b193b0a69a7d08334187959faea6b48.hip | // !!! This is a file automatically generated by hipify!!!
/*
* CURAND API: inizio uso dei PseudoRandom Number Generator
* limitazione a 65535 kernel, lancio monodimensionale/bidimensionale
* attenzione al numero N...non riesco a processare molti dati usando in contemporanea il monitor
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#define PI 3.14159265358979323846
#define N 128
#define N2 N
__global__ void setup_kernel ( hiprandStateXORWOW_t * state){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x+ y*blockDim.x*gridDim.x;
while (offset<N){
/* Each thread gets same seed , a different sequence number no offset */
hiprand_init (1234 , offset , 0 , &state[offset]);
offset += blockDim.x*gridDim.x;
__syncthreads();
}
}
__global__ void generate_bit_kernel ( hiprandStateXORWOW_t * state , float * result ){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y*blockDim.x*gridDim.x;
while (offset < N){
hiprandStateXORWOW_t localState = state[offset];
float awgn = hiprand_normal(&localState);
result[offset] = awgn;
//state[offset]=localState;
offset += blockDim.x*gridDim.x;
__syncthreads();
}
}
int main ( int argc , char * argv []){
int i;
//dim3 dimGrid(1024); //numero block
//dim3 dimBlock(512); //numero threads per block
dim3 dimGrid(8,8);
dim3 dimBlock(16,16); // 2 dim max 512
hiprandStateXORWOW_t * devStates ;
float *hostResults, *devResults;
/* Allocate space for results on host */
hostResults = ( float *) calloc (N2 , sizeof(float) );
/* Allocate space for results on device */
hipMalloc (( void **) &devResults , N2 * sizeof(float) );
/* Set results to 0 */
hipMemset ( devResults , 2, N2 * sizeof(float) );
/* Allocate space for prng states on device */
hipMalloc (( void **) &devStates , N2 * sizeof(hiprandStateXORWOW_t) );
/* Setup prng states */
hipLaunchKernelGGL(( setup_kernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, devStates ) ;
hipDeviceSynchronize();
/* Generate and use pseudo - random */
//for ( i = 0; i < 10; i++) {
hipLaunchKernelGGL(( generate_bit_kernel) , dim3(dimGrid),dim3(dimBlock), 0, 0, devStates , devResults ) ;
hipDeviceSynchronize();
//}
/* Copy device memory to host */
hipMemcpy ( hostResults , devResults , N2 * sizeof(float) , hipMemcpyDeviceToHost ) ;
/* Show result */
float tmp;
for ( i=0; i < N; i++ ) {
tmp+=hostResults[i];
}
printf("%f \n",tmp);
/* Cleanup */
hipFree(devStates);
hipFree(devResults);
free(hostResults);
system("pause");
return EXIT_SUCCESS;
} | 8ac657f52b193b0a69a7d08334187959faea6b48.cu | /*
* CURAND API: inizio uso dei PseudoRandom Number Generator
* limitazione a 65535 kernel, lancio monodimensionale/bidimensionale
* attenzione al numero N...non riesco a processare molti dati usando in contemporanea il monitor
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <curand_kernel.h>
#define PI 3.14159265358979323846
#define N 128
#define N2 N
__global__ void setup_kernel ( curandStateXORWOW_t * state){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x+ y*blockDim.x*gridDim.x;
while (offset<N){
/* Each thread gets same seed , a different sequence number no offset */
curand_init (1234 , offset , 0 , &state[offset]);
offset += blockDim.x*gridDim.x;
__syncthreads();
}
}
__global__ void generate_bit_kernel ( curandStateXORWOW_t * state , float * result ){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y*blockDim.x*gridDim.x;
while (offset < N){
curandStateXORWOW_t localState = state[offset];
float awgn = curand_normal(&localState);
result[offset] = awgn;
//state[offset]=localState;
offset += blockDim.x*gridDim.x;
__syncthreads();
}
}
int main ( int argc , char * argv []){
int i;
//dim3 dimGrid(1024); //numero block
//dim3 dimBlock(512); //numero threads per block
dim3 dimGrid(8,8);
dim3 dimBlock(16,16); // 2 dim max 512
curandStateXORWOW_t * devStates ;
float *hostResults, *devResults;
/* Allocate space for results on host */
hostResults = ( float *) calloc (N2 , sizeof(float) );
/* Allocate space for results on device */
cudaMalloc (( void **) &devResults , N2 * sizeof(float) );
/* Set results to 0 */
cudaMemset ( devResults , 2, N2 * sizeof(float) );
/* Allocate space for prng states on device */
cudaMalloc (( void **) &devStates , N2 * sizeof(curandStateXORWOW_t) );
/* Setup prng states */
setup_kernel <<<dimGrid, dimBlock>>>( devStates ) ;
cudaThreadSynchronize();
/* Generate and use pseudo - random */
//for ( i = 0; i < 10; i++) {
generate_bit_kernel <<<dimGrid,dimBlock>>>( devStates , devResults ) ;
cudaThreadSynchronize();
//}
/* Copy device memory to host */
cudaMemcpy ( hostResults , devResults , N2 * sizeof(float) , cudaMemcpyDeviceToHost ) ;
/* Show result */
float tmp;
for ( i=0; i < N; i++ ) {
tmp+=hostResults[i];
}
printf("%f \n",tmp);
/* Cleanup */
cudaFree(devStates);
cudaFree(devResults);
free(hostResults);
system("pause");
return EXIT_SUCCESS;
} |
ec16c45f555317106c54217fdc168e6d487b3660.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Added by Karel Adamek
//#define THRESHOLD_DEBUG
#include <helper_cuda.h>
#include "headers/device_BC_plan.h"
#include "headers/params.h"
#include "device_threshold_kernel.cu"
void THR_init(void) {
//---------> Specific nVidia stuff
hipDeviceSetCacheConfig (hipFuncCachePreferShared);
hipDeviceSetSharedMemConfig (hipSharedMemBankSizeFourByte);
}
int THRESHOLD(float *d_input, ushort *d_input_taps, float *d_output_list, int *gmem_pos, float threshold, int nDMs, int nTimesamples, int shift, std::vector<PulseDetection_plan> *PD_plan, int max_iteration, int max_list_size) {
//---------> Task specific
int nBlocks, nRest, Elements_per_block, output_offset, decimated_timesamples, local_offset;
int nCUDAblocks_x, nCUDAblocks_y;
dim3 gridSize(1, 1, 1);
dim3 blockSize(WARP*THR_WARPS_PER_BLOCK, 1, 1);
THR_init();
output_offset=0;
for(int f=0; f<max_iteration; f++){
decimated_timesamples = PD_plan->operator[](f).decimated_timesamples;
//local_offset = (offset>>f);
local_offset = PD_plan->operator[](f).unprocessed_samples;
if( (decimated_timesamples-local_offset)>0 ){
Elements_per_block = WARP*THR_ELEM_PER_THREAD;
nBlocks = (decimated_timesamples-local_offset)/Elements_per_block;
nRest = (decimated_timesamples-local_offset) - nBlocks*Elements_per_block;
if(nRest>0) nBlocks++;
nCUDAblocks_x = nBlocks;
nCUDAblocks_y = nDMs/THR_WARPS_PER_BLOCK;
gridSize.x=nCUDAblocks_x; gridSize.y=nCUDAblocks_y; gridSize.z=1;
blockSize.x=WARP*THR_WARPS_PER_BLOCK; blockSize.y=1; blockSize.z=1;
output_offset = nDMs*PD_plan->operator[](f).output_shift;
hipLaunchKernelGGL(( THR_GPU_WARP), dim3(gridSize), dim3(blockSize), 0, 0, &d_input[output_offset], &d_input_taps[output_offset], d_output_list, gmem_pos, threshold, decimated_timesamples, decimated_timesamples-local_offset, shift, max_list_size, (1<<f));
checkCudaErrors(hipGetLastError());
}
}
return (0);
}
int Threshold_for_periodicity(float *d_input, ushort *d_input_harms, float *d_output_list, int *gmem_pos, float *d_MSD, float threshold, int primary_size, int secondary_size, int DM_shift, int inBin, int max_list_size) {
//---------> Task specific
int nBlocks_p, nBlocks_s;
dim3 gridSize(1, 1, 1);
dim3 blockSize(WARP, WARP/2, 1);
nBlocks_p = (int) (primary_size/(blockSize.x*THR_ELEM_PER_THREAD));
if( (primary_size%(blockSize.x*THR_ELEM_PER_THREAD))!=0 ) nBlocks_p++;
nBlocks_s = (int) (secondary_size/blockSize.y);
if( (secondary_size%blockSize.y)!=0 ) nBlocks_s++;
gridSize.x = nBlocks_p;
gridSize.y = nBlocks_s;
gridSize.z = 1;
#ifdef THRESHOLD_DEBUG
printf("Primary:%d; Secondary:%d\n", primary_size, secondary_size);
printf("gridSize: [%d; %d; %d]\n", gridSize.x, gridSize.y, gridSize.z);
printf("blockSize: [%d; %d; %d]\n", blockSize.x, blockSize.y, blockSize.z);
#endif
THR_init();
hipLaunchKernelGGL(( GPU_Threshold_for_periodicity_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_input, d_input_harms, d_output_list, gmem_pos, d_MSD, threshold, primary_size, secondary_size, DM_shift, max_list_size, inBin);
checkCudaErrors(hipGetLastError());
return (0);
}
| ec16c45f555317106c54217fdc168e6d487b3660.cu | //Added by Karel Adamek
//#define THRESHOLD_DEBUG
#include <helper_cuda.h>
#include "headers/device_BC_plan.h"
#include "headers/params.h"
#include "device_threshold_kernel.cu"
void THR_init(void) {
//---------> Specific nVidia stuff
cudaDeviceSetCacheConfig (cudaFuncCachePreferShared);
cudaDeviceSetSharedMemConfig (cudaSharedMemBankSizeFourByte);
}
int THRESHOLD(float *d_input, ushort *d_input_taps, float *d_output_list, int *gmem_pos, float threshold, int nDMs, int nTimesamples, int shift, std::vector<PulseDetection_plan> *PD_plan, int max_iteration, int max_list_size) {
//---------> Task specific
int nBlocks, nRest, Elements_per_block, output_offset, decimated_timesamples, local_offset;
int nCUDAblocks_x, nCUDAblocks_y;
dim3 gridSize(1, 1, 1);
dim3 blockSize(WARP*THR_WARPS_PER_BLOCK, 1, 1);
THR_init();
output_offset=0;
for(int f=0; f<max_iteration; f++){
decimated_timesamples = PD_plan->operator[](f).decimated_timesamples;
//local_offset = (offset>>f);
local_offset = PD_plan->operator[](f).unprocessed_samples;
if( (decimated_timesamples-local_offset)>0 ){
Elements_per_block = WARP*THR_ELEM_PER_THREAD;
nBlocks = (decimated_timesamples-local_offset)/Elements_per_block;
nRest = (decimated_timesamples-local_offset) - nBlocks*Elements_per_block;
if(nRest>0) nBlocks++;
nCUDAblocks_x = nBlocks;
nCUDAblocks_y = nDMs/THR_WARPS_PER_BLOCK;
gridSize.x=nCUDAblocks_x; gridSize.y=nCUDAblocks_y; gridSize.z=1;
blockSize.x=WARP*THR_WARPS_PER_BLOCK; blockSize.y=1; blockSize.z=1;
output_offset = nDMs*PD_plan->operator[](f).output_shift;
THR_GPU_WARP<<<gridSize, blockSize>>>(&d_input[output_offset], &d_input_taps[output_offset], d_output_list, gmem_pos, threshold, decimated_timesamples, decimated_timesamples-local_offset, shift, max_list_size, (1<<f));
checkCudaErrors(cudaGetLastError());
}
}
return (0);
}
int Threshold_for_periodicity(float *d_input, ushort *d_input_harms, float *d_output_list, int *gmem_pos, float *d_MSD, float threshold, int primary_size, int secondary_size, int DM_shift, int inBin, int max_list_size) {
//---------> Task specific
int nBlocks_p, nBlocks_s;
dim3 gridSize(1, 1, 1);
dim3 blockSize(WARP, WARP/2, 1);
nBlocks_p = (int) (primary_size/(blockSize.x*THR_ELEM_PER_THREAD));
if( (primary_size%(blockSize.x*THR_ELEM_PER_THREAD))!=0 ) nBlocks_p++;
nBlocks_s = (int) (secondary_size/blockSize.y);
if( (secondary_size%blockSize.y)!=0 ) nBlocks_s++;
gridSize.x = nBlocks_p;
gridSize.y = nBlocks_s;
gridSize.z = 1;
#ifdef THRESHOLD_DEBUG
printf("Primary:%d; Secondary:%d\n", primary_size, secondary_size);
printf("gridSize: [%d; %d; %d]\n", gridSize.x, gridSize.y, gridSize.z);
printf("blockSize: [%d; %d; %d]\n", blockSize.x, blockSize.y, blockSize.z);
#endif
THR_init();
GPU_Threshold_for_periodicity_kernel<<<gridSize, blockSize>>>(d_input, d_input_harms, d_output_list, gmem_pos, d_MSD, threshold, primary_size, secondary_size, DM_shift, max_list_size, inBin);
checkCudaErrors(cudaGetLastError());
return (0);
}
|
b24a0a5e11738b149b2fcc63424695febebd0aec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void inefficient_prefixSum(float* in, int in_length, float* out ){
//shared memory declaration
extern __shared__ float DSM[];
//compute index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < in_length){
//load on shared memory
DSM[threadIdx.x] = in[idx];
//compute prefix_sum making sequence of sums
for(int stride = 1; stride <= threadIdx.x; stride *= 2){
__syncthreads();
DSM[threadIdx.x] = DSM[threadIdx.x] + DSM[threadIdx.x - stride];
}
out[idx] = DSM[threadIdx.x];
}
} | b24a0a5e11738b149b2fcc63424695febebd0aec.cu | #include "includes.h"
__global__ void inefficient_prefixSum(float* in, int in_length, float* out ){
//shared memory declaration
extern __shared__ float DSM[];
//compute index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < in_length){
//load on shared memory
DSM[threadIdx.x] = in[idx];
//compute prefix_sum making sequence of sums
for(int stride = 1; stride <= threadIdx.x; stride *= 2){
__syncthreads();
DSM[threadIdx.x] = DSM[threadIdx.x] + DSM[threadIdx.x - stride];
}
out[idx] = DSM[threadIdx.x];
}
} |
7ac99ebaa02b2fbde8fd89de21810d812ef7c2b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Bitshuffle cuda kernels aimed at hdf5 bslz4 format from the Dectris Eiger detectors.
Written by Jon Wright, ESRF, June 2021.
*/
#include <stdint.h>
__global__ void copybytes(uint8_t * src, uint32_t srcstart, uint8_t * dst, uint32_t dststart )
{
dst[threadIdx.x + dststart] = src[threadIdx.x+srcstart];
}
__global__ void shuf_8192_32(const uint32_t * __restrict__ in,
uint32_t * __restrict__ out ){
/*
grid 32x32 threads
each thread loads 4 bytes (aligned) = 128 bytes per row of 32
total bytes loaded = 32x32x4 = 4096 bytes
x y z
blocks = ( total_bytes / 8192, 2, 1 )
*/
__shared__ uint32_t smem[32][33];
uint32_t v;
/* This thread is going to load 4 bytes. Next thread in x will load
the next 4 to be aligned. In total we pick up 32*4 = 128 bytes in this
row of 32 (warp) for bit0.
The next row (warp) is going to pick up bit1, etc
The first grid starts at byte 0 + blockIdx.x * 2048
The second grid starts at byte 8192/32/2
*/
smem[ threadIdx.y ][ threadIdx.x ] = in[ threadIdx.x + // Aligned loads. 32*4 = 128 bytes
threadIdx.y*64 + // Offset to next bit = 8192/32/4.
blockIdx.x*2048 + // Start of the block
blockIdx.y*32 ]; // Next 32 reads
__syncthreads(); /* Now we loaded 4 kB to smem. Do the first level of transpose */
v = smem[ threadIdx.x ][ threadIdx.y ];
#pragma unroll 32
for( int i = 0; i < 32; i++ )
smem[i][threadIdx.y] = __ballot_sync(0xFFFFFFFFU, v & (1U<<i) );
__syncthreads(); /* Now we loaded 4 kB to smem. Do the first level of transpose */
out[ threadIdx.x + threadIdx.y*32 + blockIdx.y*1024 + blockIdx.x*2048 ] = smem[threadIdx.x][threadIdx.y];
}
__global__ void shuf_8192_16(const uint32_t * __restrict__ in,
uint32_t * __restrict__ out ){
/*
grid 32x32 threads
each thread loads 4 bytes (aligned) = 128 bytes per row of 32
total bytes loaded = 32x32x4 = 4096 bytes
x y z
blocks = ( total_bytes / 8192, 2, 1 )
*/
__shared__ uint32_t smem[32][33];
/* This thread is going to load 4 bytes. Next thread in x will load
the next 4 to be aligned. In total we pick up 32*4 = 128 bytes in this
row of 32 (warp) for bit0.
The next row (warp) is going to pick up bit1, etc
The first grid starts at byte 0 + blockIdx.x * 2048
The second grid starts at byte 8192/32/2
*/
int itemx = threadIdx.x / 16;
int shft = 16*itemx;
uint32_t v, mask = 0xFFFFU << shft ;
smem[ threadIdx.y ][ threadIdx.x ] = in[ threadIdx.x + // Aligned loads. 32*4 = 128 bytes = 1024 bits
( threadIdx.y / 16 ) * 32 + // end of the threadIdx.x reads
( threadIdx.y % 16 ) * 128 + // position of the next bit
blockIdx.x*2048 + // Start of the block
blockIdx.y*64 ] ; // Next 32*2 byte reads
__syncthreads(); /* Now we loaded 4 kB to smem. Do the first level of transpose */
/* (0-32)x4x8=1024 */
v = smem[ threadIdx.x ][ threadIdx.y ];
#pragma unroll 32
for( int i = 0; i < 32; i++ )
smem[i][threadIdx.y] = __ballot_sync(0xFFFFFFFFU, v & (1U<<i) );
__syncthreads();
/* output is [0, 1024] [1, 1025] [2,1026], ...
smem[i = 0 -> 1024 outputs][2 each]
output[x, y] = [2][1024]
*/
out[ threadIdx.x + shft * 31 + threadIdx.y*16 + blockIdx.y*1024 + blockIdx.x*2048 ] =
( ( smem[ 2 * (threadIdx.x%16) ][ threadIdx.y ] & mask ) >> shft ) |
( ( smem[ 2 * (threadIdx.x%16)+1 ][ threadIdx.y ] & mask ) << 16-shft );
}
__global__ void shuf_8192_8(const uint32_t * __restrict__ in,
uint32_t * __restrict__ out ){
/*
grid 32x32 threads
each thread loads 4 bytes (aligned) = 128 bytes per row of 32
total bytes loaded = 32x32x4 = 4096 bytes
x y z
blocks = ( total_bytes / 8192, 2, 1 )
*/
__shared__ uint32_t smem[32][33];
/* This thread is going to load 4 bytes. Next thread in x will load
the next 4 to be aligned. In total we pick up 32*4 = 128 bytes in this
row of 32 (warp) for bit0.
The next row (warp) is going to pick up bit1, etc
The first grid starts at byte 0 + blockIdx.x * 2048
The second grid starts at byte 8192/32/2
*/
uint32_t bitx = 4 *( threadIdx.x % 8 );
uint32_t bytx = threadIdx.x / 8;
uint32_t mask = 0xFFU << ( 8*bytx );
smem[ threadIdx.y ][ threadIdx.x ] = in[ threadIdx.x + // Aligned loads. 32*4 = 128 bytes = 1024 bits
(threadIdx.y/8) * 32 + // end of the threadIdx.x reads
(threadIdx.y%8) * 256 + // position of the next bit
blockIdx.x*2048 + // Start of the block
blockIdx.y*128 ] ; // Next 32*4 byte reads
__syncthreads(); /* Now we loaded 4 kB to smem. Do the first level of transpose */
/* (0-32)x4x8=1024 */
uint32_t v = smem[ threadIdx.x ][ threadIdx.y ];
#pragma unroll 32
for( int i = 0; i < 32; i++ )
smem[i][threadIdx.y] = __ballot_sync(0xFFFFFFFFU, v & (1U<<i) );
v = 0U;
__syncthreads();
/* output is [0, 1024, 2048, 4096]
smem[i = 0 -> 1024 outputs][4 each]
output[x, y] = [4][1024]
*/
switch (bytx){
case 0:
v |= ((smem[ bitx ][ threadIdx.y ] & mask) ) ;
v |= ((smem[ bitx + 1 ][ threadIdx.y ] & mask) << 8 ) ;
v |= ((smem[ bitx + 2 ][ threadIdx.y ] & mask) << 16 ) ;
v |= ((smem[ bitx + 3 ][ threadIdx.y ] & mask) << 24 ) ;
break;
case 1:
v |= ((smem[ bitx + 1 ][ threadIdx.y ] & mask) ) ;
v |= ((smem[ bitx + 2 ][ threadIdx.y ] & mask) << 8 ) ;
v |= ((smem[ bitx + 3 ][ threadIdx.y ] & mask) << 16 ) ;
v |= ((smem[ bitx ][ threadIdx.y ] & mask) >> 8 ) ;
break;
case 2:
v |= ((smem[ bitx + 2 ][ threadIdx.y ] & mask) ) ;
v |= ((smem[ bitx + 3 ][ threadIdx.y ] & mask) << 8 ) ;
v |= ((smem[ bitx ][ threadIdx.y ] & mask) >> 16 ) ;
v |= ((smem[ bitx + 1 ][ threadIdx.y ] & mask) >> 8 ) ;
break;
case 3:
v |= ((smem[ bitx + 3 ][ threadIdx.y ] & mask) ) ;
v |= ((smem[ bitx ][ threadIdx.y ] & mask) >> 24 ) ;
v |= ((smem[ bitx + 1 ][ threadIdx.y ] & mask) >> 16 ) ;
v |= ((smem[ bitx + 2 ][ threadIdx.y ] & mask) >> 8 ) ;
break;
}
out[ threadIdx.x + (threadIdx.x/8) * 248 + threadIdx.y*8 + blockIdx.y*1024 + blockIdx.x*2048 ] = v;
}
__global__ void simple_shuffle(const uint8_t * __restrict__ in, uint8_t * __restrict__ out,
const uint32_t blocksize, const uint32_t total_bytes, const uint32_t elemsize ) {
// slow : do not use except for debugging
// 0-32 32
uint32_t dest = threadIdx.x + blockIdx.x * blockDim.x; // where to write output
// first input byte : bytes_per_elem==4
uint32_t block_id = (dest * elemsize) / blocksize; // which block is this block ?
uint32_t block_start = block_id * blocksize; // where did the block start ?
uint32_t nblocks = total_bytes / blocksize; // rounds down
uint32_t bsize = blocksize;
uint32_t tocopy = 0;
uint32_t elements_in_block = bsize / elemsize;
uint32_t position_in_block = dest % elements_in_block; // 0 -> 2048
int loop = 1;
if( block_id == nblocks ) { // this nmight not be a full length block.
bsize = total_bytes % blocksize;
tocopy = bsize % ( 8 * elemsize);
bsize -= tocopy;
elements_in_block = bsize / elemsize;
if( position_in_block >= elements_in_block ){
// this is a copy
for( int i = 0 ; i < elemsize ; i++ ){
out[ dest * elemsize + i ] = in[ dest * elemsize + i ];
}
loop = 0;
} else {
position_in_block = position_in_block % elements_in_block;
}
}
if (loop && block_id <= nblocks) {
const uint8_t * mybyte = in + block_start + ( position_in_block / 8 );
uint8_t mymask = 1U << (position_in_block % 8);
uint32_t bytestride = bsize / ( 8 * elemsize );
uint32_t myval = 0;
for( int i = 0 ; i < elemsize*8 ; i ++ ) { // grab my bits
if( (*mybyte & mymask) > 0 ) {
myval = myval | (1U << i);
}
mybyte = mybyte + bytestride;
}
for( int i = 0; i<elemsize ; i++){
out[dest * elemsize + i] = (uint8_t) ((myval)>>(8*i));
}
}
}
__global__ void simple_shuffle_end(const uint8_t * __restrict__ in, uint8_t * __restrict__ out,
const uint32_t blocksize, const uint32_t total_bytes, const uint32_t elemsize,
const uint32_t startpos
) {
// slow : do not use except for debugging
// 0-32 32
uint32_t dest = threadIdx.x + blockIdx.x * blockDim.x; // where to write output
// first input byte : bytes_per_elem==4
uint32_t block_id = (dest * elemsize) / blocksize; // which block is this block ?
uint32_t block_start = block_id * blocksize; // where did the block start ?
uint32_t nblocks = total_bytes / blocksize; // rounds down
uint32_t bsize = blocksize;
uint32_t tocopy = 0;
uint32_t elements_in_block = bsize / elemsize;
uint32_t position_in_block = dest % elements_in_block; // 0 -> 2048
int loop = 1;
if( block_id == nblocks ) { // this nmight not be a full length block.
bsize = total_bytes % blocksize;
tocopy = bsize % ( 8 * elemsize);
bsize -= tocopy;
elements_in_block = bsize / elemsize;
if( position_in_block >= elements_in_block ){
loop = 0;
} else {
position_in_block = position_in_block % elements_in_block;
}
}
if (loop && block_id <= nblocks) {
const uint8_t * mybyte = in + startpos + block_start + ( position_in_block / 8 );
uint8_t mymask = 1U << (position_in_block % 8);
uint32_t bytestride = bsize / ( 8 * elemsize );
uint32_t myval = 0;
for( int i = 0 ; i < elemsize*8 ; i ++ ) { // grab my bits
if( (*mybyte & mymask) > 0 ) {
myval = myval | (1U << i);
}
mybyte = mybyte + bytestride;
}
for( int i = 0; i<elemsize ; i++){
out[startpos + dest * elemsize + i] = (uint8_t) ((myval)>>(8*i));
}
}
}
| 7ac99ebaa02b2fbde8fd89de21810d812ef7c2b2.cu |
/* Bitshuffle cuda kernels aimed at hdf5 bslz4 format from the Dectris Eiger detectors.
Written by Jon Wright, ESRF, June 2021.
*/
#include <stdint.h>
__global__ void copybytes(uint8_t * src, uint32_t srcstart, uint8_t * dst, uint32_t dststart )
{
dst[threadIdx.x + dststart] = src[threadIdx.x+srcstart];
}
__global__ void shuf_8192_32(const uint32_t * __restrict__ in,
uint32_t * __restrict__ out ){
/*
grid 32x32 threads
each thread loads 4 bytes (aligned) = 128 bytes per row of 32
total bytes loaded = 32x32x4 = 4096 bytes
x y z
blocks = ( total_bytes / 8192, 2, 1 )
*/
__shared__ uint32_t smem[32][33];
uint32_t v;
/* This thread is going to load 4 bytes. Next thread in x will load
the next 4 to be aligned. In total we pick up 32*4 = 128 bytes in this
row of 32 (warp) for bit0.
The next row (warp) is going to pick up bit1, etc
The first grid starts at byte 0 + blockIdx.x * 2048
The second grid starts at byte 8192/32/2
*/
smem[ threadIdx.y ][ threadIdx.x ] = in[ threadIdx.x + // Aligned loads. 32*4 = 128 bytes
threadIdx.y*64 + // Offset to next bit = 8192/32/4.
blockIdx.x*2048 + // Start of the block
blockIdx.y*32 ]; // Next 32 reads
__syncthreads(); /* Now we loaded 4 kB to smem. Do the first level of transpose */
v = smem[ threadIdx.x ][ threadIdx.y ];
#pragma unroll 32
for( int i = 0; i < 32; i++ )
smem[i][threadIdx.y] = __ballot_sync(0xFFFFFFFFU, v & (1U<<i) );
__syncthreads(); /* Now we loaded 4 kB to smem. Do the first level of transpose */
out[ threadIdx.x + threadIdx.y*32 + blockIdx.y*1024 + blockIdx.x*2048 ] = smem[threadIdx.x][threadIdx.y];
}
__global__ void shuf_8192_16(const uint32_t * __restrict__ in,
uint32_t * __restrict__ out ){
/*
grid 32x32 threads
each thread loads 4 bytes (aligned) = 128 bytes per row of 32
total bytes loaded = 32x32x4 = 4096 bytes
x y z
blocks = ( total_bytes / 8192, 2, 1 )
*/
__shared__ uint32_t smem[32][33];
/* This thread is going to load 4 bytes. Next thread in x will load
the next 4 to be aligned. In total we pick up 32*4 = 128 bytes in this
row of 32 (warp) for bit0.
The next row (warp) is going to pick up bit1, etc
The first grid starts at byte 0 + blockIdx.x * 2048
The second grid starts at byte 8192/32/2
*/
int itemx = threadIdx.x / 16;
int shft = 16*itemx;
uint32_t v, mask = 0xFFFFU << shft ;
smem[ threadIdx.y ][ threadIdx.x ] = in[ threadIdx.x + // Aligned loads. 32*4 = 128 bytes = 1024 bits
( threadIdx.y / 16 ) * 32 + // end of the threadIdx.x reads
( threadIdx.y % 16 ) * 128 + // position of the next bit
blockIdx.x*2048 + // Start of the block
blockIdx.y*64 ] ; // Next 32*2 byte reads
__syncthreads(); /* Now we loaded 4 kB to smem. Do the first level of transpose */
/* (0-32)x4x8=1024 */
v = smem[ threadIdx.x ][ threadIdx.y ];
#pragma unroll 32
for( int i = 0; i < 32; i++ )
smem[i][threadIdx.y] = __ballot_sync(0xFFFFFFFFU, v & (1U<<i) );
__syncthreads();
/* output is [0, 1024] [1, 1025] [2,1026], ...
smem[i = 0 -> 1024 outputs][2 each]
output[x, y] = [2][1024]
*/
out[ threadIdx.x + shft * 31 + threadIdx.y*16 + blockIdx.y*1024 + blockIdx.x*2048 ] =
( ( smem[ 2 * (threadIdx.x%16) ][ threadIdx.y ] & mask ) >> shft ) |
( ( smem[ 2 * (threadIdx.x%16)+1 ][ threadIdx.y ] & mask ) << 16-shft );
}
__global__ void shuf_8192_8(const uint32_t * __restrict__ in,
uint32_t * __restrict__ out ){
/*
grid 32x32 threads
each thread loads 4 bytes (aligned) = 128 bytes per row of 32
total bytes loaded = 32x32x4 = 4096 bytes
x y z
blocks = ( total_bytes / 8192, 2, 1 )
*/
__shared__ uint32_t smem[32][33];
/* This thread is going to load 4 bytes. Next thread in x will load
the next 4 to be aligned. In total we pick up 32*4 = 128 bytes in this
row of 32 (warp) for bit0.
The next row (warp) is going to pick up bit1, etc
The first grid starts at byte 0 + blockIdx.x * 2048
The second grid starts at byte 8192/32/2
*/
uint32_t bitx = 4 *( threadIdx.x % 8 );
uint32_t bytx = threadIdx.x / 8;
uint32_t mask = 0xFFU << ( 8*bytx );
smem[ threadIdx.y ][ threadIdx.x ] = in[ threadIdx.x + // Aligned loads. 32*4 = 128 bytes = 1024 bits
(threadIdx.y/8) * 32 + // end of the threadIdx.x reads
(threadIdx.y%8) * 256 + // position of the next bit
blockIdx.x*2048 + // Start of the block
blockIdx.y*128 ] ; // Next 32*4 byte reads
__syncthreads(); /* Now we loaded 4 kB to smem. Do the first level of transpose */
/* (0-32)x4x8=1024 */
uint32_t v = smem[ threadIdx.x ][ threadIdx.y ];
#pragma unroll 32
for( int i = 0; i < 32; i++ )
smem[i][threadIdx.y] = __ballot_sync(0xFFFFFFFFU, v & (1U<<i) );
v = 0U;
__syncthreads();
/* output is [0, 1024, 2048, 4096]
smem[i = 0 -> 1024 outputs][4 each]
output[x, y] = [4][1024]
*/
switch (bytx){
case 0:
v |= ((smem[ bitx ][ threadIdx.y ] & mask) ) ;
v |= ((smem[ bitx + 1 ][ threadIdx.y ] & mask) << 8 ) ;
v |= ((smem[ bitx + 2 ][ threadIdx.y ] & mask) << 16 ) ;
v |= ((smem[ bitx + 3 ][ threadIdx.y ] & mask) << 24 ) ;
break;
case 1:
v |= ((smem[ bitx + 1 ][ threadIdx.y ] & mask) ) ;
v |= ((smem[ bitx + 2 ][ threadIdx.y ] & mask) << 8 ) ;
v |= ((smem[ bitx + 3 ][ threadIdx.y ] & mask) << 16 ) ;
v |= ((smem[ bitx ][ threadIdx.y ] & mask) >> 8 ) ;
break;
case 2:
v |= ((smem[ bitx + 2 ][ threadIdx.y ] & mask) ) ;
v |= ((smem[ bitx + 3 ][ threadIdx.y ] & mask) << 8 ) ;
v |= ((smem[ bitx ][ threadIdx.y ] & mask) >> 16 ) ;
v |= ((smem[ bitx + 1 ][ threadIdx.y ] & mask) >> 8 ) ;
break;
case 3:
v |= ((smem[ bitx + 3 ][ threadIdx.y ] & mask) ) ;
v |= ((smem[ bitx ][ threadIdx.y ] & mask) >> 24 ) ;
v |= ((smem[ bitx + 1 ][ threadIdx.y ] & mask) >> 16 ) ;
v |= ((smem[ bitx + 2 ][ threadIdx.y ] & mask) >> 8 ) ;
break;
}
out[ threadIdx.x + (threadIdx.x/8) * 248 + threadIdx.y*8 + blockIdx.y*1024 + blockIdx.x*2048 ] = v;
}
__global__ void simple_shuffle(const uint8_t * __restrict__ in, uint8_t * __restrict__ out,
const uint32_t blocksize, const uint32_t total_bytes, const uint32_t elemsize ) {
// slow : do not use except for debugging
// 0-32 32
uint32_t dest = threadIdx.x + blockIdx.x * blockDim.x; // where to write output
// first input byte : bytes_per_elem==4
uint32_t block_id = (dest * elemsize) / blocksize; // which block is this block ?
uint32_t block_start = block_id * blocksize; // where did the block start ?
uint32_t nblocks = total_bytes / blocksize; // rounds down
uint32_t bsize = blocksize;
uint32_t tocopy = 0;
uint32_t elements_in_block = bsize / elemsize;
uint32_t position_in_block = dest % elements_in_block; // 0 -> 2048
int loop = 1;
if( block_id == nblocks ) { // this nmight not be a full length block.
bsize = total_bytes % blocksize;
tocopy = bsize % ( 8 * elemsize);
bsize -= tocopy;
elements_in_block = bsize / elemsize;
if( position_in_block >= elements_in_block ){
// this is a copy
for( int i = 0 ; i < elemsize ; i++ ){
out[ dest * elemsize + i ] = in[ dest * elemsize + i ];
}
loop = 0;
} else {
position_in_block = position_in_block % elements_in_block;
}
}
if (loop && block_id <= nblocks) {
const uint8_t * mybyte = in + block_start + ( position_in_block / 8 );
uint8_t mymask = 1U << (position_in_block % 8);
uint32_t bytestride = bsize / ( 8 * elemsize );
uint32_t myval = 0;
for( int i = 0 ; i < elemsize*8 ; i ++ ) { // grab my bits
if( (*mybyte & mymask) > 0 ) {
myval = myval | (1U << i);
}
mybyte = mybyte + bytestride;
}
for( int i = 0; i<elemsize ; i++){
out[dest * elemsize + i] = (uint8_t) ((myval)>>(8*i));
}
}
}
__global__ void simple_shuffle_end(const uint8_t * __restrict__ in, uint8_t * __restrict__ out,
const uint32_t blocksize, const uint32_t total_bytes, const uint32_t elemsize,
const uint32_t startpos
) {
// slow : do not use except for debugging
// 0-32 32
uint32_t dest = threadIdx.x + blockIdx.x * blockDim.x; // where to write output
// first input byte : bytes_per_elem==4
uint32_t block_id = (dest * elemsize) / blocksize; // which block is this block ?
uint32_t block_start = block_id * blocksize; // where did the block start ?
uint32_t nblocks = total_bytes / blocksize; // rounds down
uint32_t bsize = blocksize;
uint32_t tocopy = 0;
uint32_t elements_in_block = bsize / elemsize;
uint32_t position_in_block = dest % elements_in_block; // 0 -> 2048
int loop = 1;
if( block_id == nblocks ) { // this nmight not be a full length block.
bsize = total_bytes % blocksize;
tocopy = bsize % ( 8 * elemsize);
bsize -= tocopy;
elements_in_block = bsize / elemsize;
if( position_in_block >= elements_in_block ){
loop = 0;
} else {
position_in_block = position_in_block % elements_in_block;
}
}
if (loop && block_id <= nblocks) {
const uint8_t * mybyte = in + startpos + block_start + ( position_in_block / 8 );
uint8_t mymask = 1U << (position_in_block % 8);
uint32_t bytestride = bsize / ( 8 * elemsize );
uint32_t myval = 0;
for( int i = 0 ; i < elemsize*8 ; i ++ ) { // grab my bits
if( (*mybyte & mymask) > 0 ) {
myval = myval | (1U << i);
}
mybyte = mybyte + bytestride;
}
for( int i = 0; i<elemsize ; i++){
out[startpos + dest * elemsize + i] = (uint8_t) ((myval)>>(8*i));
}
}
}
|
73464be9ee85105752972f2ad519d5a1a339611f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* nvbio
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//#define NVBIO_ENABLE_PROFILING
#define MOD_NAMESPACE
#define MOD_NAMESPACE_BEGIN namespace bowtie2 { namespace driver {
#define MOD_NAMESPACE_END }}
#define MOD_NAMESPACE_NAME bowtie2::driver
#include <nvBowtie/bowtie2/cuda/compute_thread.h>
#include <nvBowtie/bowtie2/cuda/defs.h>
#include <nvBowtie/bowtie2/cuda/fmindex_def.h>
#include <nvBowtie/bowtie2/cuda/params.h>
#include <nvBowtie/bowtie2/cuda/stats.h>
#include <nvBowtie/bowtie2/cuda/persist.h>
#include <nvBowtie/bowtie2/cuda/scoring.h>
#include <nvBowtie/bowtie2/cuda/mapq.h>
#include <nvBowtie/bowtie2/cuda/aligner.h>
#include <nvBowtie/bowtie2/cuda/aligner_inst.h>
#include <nvBowtie/bowtie2/cuda/input_thread.h>
#include <nvbio/basic/cuda/arch.h>
#include <nvbio/basic/timer.h>
#include <nvbio/basic/console.h>
#include <nvbio/basic/options.h>
#include <nvbio/basic/threads.h>
#include <nvbio/basic/atomics.h>
#include <nvbio/basic/html.h>
#include <nvbio/basic/version.h>
#include <nvbio/fmindex/bwt.h>
#include <nvbio/fmindex/ssa.h>
#include <nvbio/fmindex/fmindex.h>
#include <nvbio/fmindex/fmindex_device.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <vector>
#include <algorithm>
#include <numeric>
#include <functional>
namespace nvbio {
namespace bowtie2 {
namespace cuda {
ComputeThreadSE::ComputeThreadSE(
const uint32 _thread_id,
const uint32 _device_id,
const io::SequenceData& _reference_data,
const io::FMIndexData& _driver_data,
const std::map<std::string,std::string>& _options,
const Params& _params,
Stats& _stats) :
thread_id( _thread_id ),
device_id( _device_id ),
reference_data_host( _reference_data ),
driver_data_host( _driver_data ),
options( _options ),
input_thread( NULL ),
output_file( NULL ),
params( _params ),
stats( _stats )
{
log_visible(stderr, "[%u] nvBowtie cuda driver created on device %u\n", thread_id, device_id);
// initialize the selected device
hipSetDevice( device_id );
hipSetDeviceFlags( hipDeviceMapHost | hipDeviceLmemResizeToMax );
aligner = SharedPointer<Aligner>( new Aligner() );
Timer timer;
timer.start();
const bool need_reverse =
(params.allow_sub == 0 && USE_REVERSE_INDEX) ||
(params.allow_sub == 1 && params.subseed_len == 0 && params.mode == BestMappingApprox);
reference_data_device.reset( new io::SequenceDataDevice( reference_data_host ) );
driver_data_device.reset( new io::FMIndexDataDevice( driver_data_host,
io::FMIndexDataDevice::FORWARD |
(need_reverse ? io::FMIndexDataDevice::REVERSE : 0u) |
io::FMIndexDataDevice::SA ) );
timer.stop();
log_stats(stderr, "[%u] allocated device driver data (%.2f GB - %.1fs)\n", thread_id, float(driver_data_device->allocated()) / 1.0e9f, timer.seconds() );
}
// gauge the favourite batch size
//
uint32 ComputeThreadSE::gauge_batch_size()
{
// switch to the selected device
hipSetDevice( device_id );
uint32 BATCH_SIZE;
for (BATCH_SIZE = params.max_batch_size*1024; BATCH_SIZE >= 16*1024; BATCH_SIZE /= 2)
{
std::pair<uint64,uint64> mem_stats;
// gauge how much memory we'd need
if (aligner->init_alloc( BATCH_SIZE, params, kSingleEnd, false, &mem_stats ) == true)
{
log_stats(stderr, "[%u] estimated allocation sizes: HOST %lu MB, DEVICE %lu MB)\n",
thread_id,
mem_stats.first / (1024*1024),
mem_stats.second / (1024*1024) );
break;
}
}
return BATCH_SIZE;
}
void ComputeThreadSE::do_run()
{
log_visible(stderr, "[%u] nvBowtie cuda driver... started\n", thread_id);
// switch to the selected device
hipSetDevice( device_id );
// build an empty report
FILE* html_output = (params.report != std::string("")) ? fopen( params.report.c_str(), "w" ) : NULL;
if (html_output)
{
// encapsulate the document
{
html::html_object html( html_output );
{
const char* meta_list = "<meta http-equiv=\"refresh\" content=\"1\" />";
{ html::header_object hd( html_output, "Bowtie2 Report", html::style(), meta_list ); }
{ html::body_object body( html_output ); }
}
}
fclose( html_output );
}
Timer timer;
io::SequenceDataDevice& reference_data = *reference_data_device.get();
io::FMIndexDataDevice& driver_data = *driver_data_device.get();
log_stats(stderr, "[%u] allocated device driver data (%.2f GB - %.1fs)\n", thread_id, float(driver_data.allocated()) / 1.0e9f, timer.seconds() );
typedef FMIndexDef::type fm_index_type;
fm_index_type fmi = driver_data.index();
fm_index_type rfmi = driver_data.rindex();
size_t free, total;
hipMemGetInfo(&free, &total);
log_stats(stderr, "[%u] device has %ld of %ld MB free\n", thread_id, free/1024/1024, total/1024/1024);
const uint32 BATCH_SIZE = input_thread->batch_size();
log_stats(stderr, "[%u] processing reads in batches of %uK\n", thread_id, BATCH_SIZE/1024);
// setup the output file
aligner->output_file = output_file;
// initialize the aligner
if (aligner->init( thread_id, BATCH_SIZE, params, kSingleEnd ) == false)
return;
nvbio::cuda::check_error("cuda initializations");
hipMemGetInfo(&free, &total);
log_stats(stderr, "[%u] ready to start processing: device has %ld MB free\n", thread_id, free/1024/1024);
Timer global_timer;
global_timer.start();
UberScoringScheme& scoring_scheme = params.scoring_scheme;
uint32 n_reads = 0;
io::SequenceDataHost local_read_data_host;
io::HostOutputBatchSE local_output_batch_host;
// loop through the batches of reads
while (1)
{
uint32 read_begin;
Timer io_timer;
io_timer.start();
io::SequenceDataHost* read_data_host = input_thread->next( &read_begin );
io_timer.stop();
stats.read_io.add( read_data_host ? read_data_host->size() : 0u, io_timer.seconds() );
if (read_data_host == NULL)
{
log_verbose(stderr, "[%u] end of input reached\n", thread_id);
break;
}
if (read_data_host->max_sequence_len() > Aligner::MAX_READ_LEN)
{
log_error(stderr, "[%u] unsupported read length %u (maximum is %u)\n", thread_id,
read_data_host->max_sequence_len(),
Aligner::MAX_READ_LEN );
break;
}
// make a local copy of the host batch
local_read_data_host = *read_data_host;
// mark this set as ready to be reused
input_thread->release( read_data_host );
Timer timer;
timer.start();
//aligner.output_file->start_batch( &local_read_data_host );
local_output_batch_host.read_data = &local_read_data_host;
io::SequenceDataDevice read_data( local_read_data_host );
hipDeviceSynchronize();
timer.stop();
stats.read_HtoD.add( read_data.size(), timer.seconds() );
const uint32 count = read_data.size();
log_info(stderr, "[%u] aligning reads [%u, %u]\n", thread_id, read_begin, read_begin + count - 1u);
log_verbose(stderr, "[%u] %u reads\n", thread_id, count);
log_verbose(stderr, "[%u] %.3f M bps (%.1f MB)\n", thread_id, float(read_data.bps())/1.0e6f, float(read_data.words()*sizeof(uint32)+read_data.bps()*sizeof(char))/float(1024*1024));
log_verbose(stderr, "[%u] %.1f bps/read (min: %u, max: %u)\n", thread_id, float(read_data.bps())/float(read_data.size()), read_data.min_sequence_len(), read_data.max_sequence_len());
if (params.mode == AllMapping)
{
if (params.scoring_mode == EditDistanceMode)
{
all_ed(
*aligner,
params,
fmi,
rfmi,
scoring_scheme,
reference_data,
driver_data,
read_data,
local_output_batch_host,
stats );
}
else
{
all_sw(
*aligner,
params,
fmi,
rfmi,
scoring_scheme,
reference_data,
driver_data,
read_data,
local_output_batch_host,
stats );
}
}
else
{
if (params.scoring_mode == EditDistanceMode)
{
best_approx_ed(
*aligner,
params,
fmi,
rfmi,
scoring_scheme,
reference_data,
driver_data,
read_data,
local_output_batch_host,
stats );
}
else
{
best_approx_sw(
*aligner,
params,
fmi,
rfmi,
scoring_scheme,
reference_data,
driver_data,
read_data,
local_output_batch_host,
stats );
}
}
global_timer.stop();
stats.global_time += global_timer.seconds();
global_timer.start();
//aligner->output_file->end_batch();
// increase the total reads counter
n_reads += count;
log_verbose(stderr, "[%u] %.1f K reads/s\n", thread_id, 1.0e-3f * float(n_reads) / stats.global_time);
}
global_timer.stop();
stats.global_time += global_timer.seconds();
stats.n_reads = n_reads;
if (params.report.length())
nvbio::bowtie2::cuda::generate_device_report( thread_id, stats, stats.mate1, params.report.c_str() );
log_visible(stderr, "[%u] nvBowtie cuda driver... done\n", thread_id);
log_stats(stderr, "[%u] total : %.2f sec (avg: %.1fK reads/s).\n", thread_id, stats.global_time, 1.0e-3f * float(n_reads)/stats.global_time);
log_stats(stderr, "[%u] mapping : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.map.time, 1.0e-6f * stats.map.avg_speed(), 1.0e-6f * stats.map.max_speed, stats.map.device_time);
log_stats(stderr, "[%u] selecting : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.select.time, 1.0e-6f * stats.select.avg_speed(), 1.0e-6f * stats.select.max_speed, stats.select.device_time);
log_stats(stderr, "[%u] sorting : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.sort.time, 1.0e-6f * stats.sort.avg_speed(), 1.0e-6f * stats.sort.max_speed, stats.sort.device_time);
log_stats(stderr, "[%u] scoring : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.score.time, 1.0e-6f * stats.score.avg_speed(), 1.0e-6f * stats.score.max_speed, stats.score.device_time);
log_stats(stderr, "[%u] locating : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.locate.time, 1.0e-6f * stats.locate.avg_speed(), 1.0e-6f * stats.locate.max_speed, stats.locate.device_time);
log_stats(stderr, "[%u] backtracking : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.backtrack.time, 1.0e-6f * stats.backtrack.avg_speed(), 1.0e-6f * stats.backtrack.max_speed, stats.backtrack.device_time);
log_stats(stderr, "[%u] finalizing : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.finalize.time, 1.0e-6f * stats.finalize.avg_speed(), 1.0e-6f * stats.finalize.max_speed, stats.finalize.device_time);
log_stats(stderr, "[%u] results DtoH : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.alignments_DtoH.time, 1.0e-6f * stats.alignments_DtoH.avg_speed(), 1.0e-6f * stats.alignments_DtoH.max_speed);
log_stats(stderr, "[%u] results I/O : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.io.time, 1.0e-6f * stats.io.avg_speed(), 1.0e-6f * stats.io.max_speed);
log_stats(stderr, "[%u] reads HtoD : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.read_HtoD.time, 1.0e-6f * stats.read_HtoD.avg_speed(), 1.0e-6f * stats.read_HtoD.max_speed);
log_stats(stderr, "[%u] reads I/O : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.read_io.time, 1.0e-6f * stats.read_io.avg_speed(), 1.0e-6f * stats.read_io.max_speed);
}
void ComputeThreadSE::run()
{
try {
do_run();
}
catch (nvbio::cuda_error &e)
{
log_error(stderr, "caught a nvbio::cuda_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::bad_alloc &e)
{
log_error(stderr, "caught a nvbio::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::logic_error &e)
{
log_error(stderr, "caught a nvbio::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::runtime_error &e)
{
log_error(stderr, "caught a nvbio::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::bad_alloc &e)
{
log_error(stderr, "caught a std::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::logic_error &e)
{
log_error(stderr, "caught a std::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::runtime_error &e)
{
log_error(stderr, "caught a std::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (...)
{
log_error(stderr, "caught an unknown exception!\n");
}
}
ComputeThreadPE::ComputeThreadPE(
const uint32 _thread_id,
const uint32 _device_id,
const io::SequenceData& _reference_data,
const io::FMIndexData& _driver_data,
const std::map<std::string,std::string>& _options,
const Params& _params,
Stats& _stats) :
thread_id( _thread_id ),
device_id( _device_id ),
reference_data_host( _reference_data ),
driver_data_host( _driver_data ),
options( _options ),
input_thread( NULL ),
output_file( NULL ),
params( _params ),
stats( _stats )
{
log_visible(stderr, "[%u] nvBowtie cuda driver created on device %u\n", thread_id, device_id);
// initialize the selected device
hipSetDevice( device_id );
hipSetDeviceFlags( hipDeviceMapHost | hipDeviceLmemResizeToMax );
aligner = SharedPointer<Aligner>( new Aligner() );
Timer timer;
timer.start();
const bool need_reverse =
(params.allow_sub == 0 && USE_REVERSE_INDEX) ||
(params.allow_sub == 1 && params.subseed_len == 0 && params.mode == BestMappingApprox);
reference_data_device.reset( new io::SequenceDataDevice( reference_data_host ) );
driver_data_device.reset( new io::FMIndexDataDevice( driver_data_host,
io::FMIndexDataDevice::FORWARD |
(need_reverse ? io::FMIndexDataDevice::REVERSE : 0u) |
io::FMIndexDataDevice::SA ) );
timer.stop();
log_stats(stderr, "[%u] allocated device driver data (%.2f GB - %.1fs)\n", thread_id, float(driver_data_device->allocated()) / 1.0e9f, timer.seconds() );
}
// gauge the favourite batch size
//
uint32 ComputeThreadPE::gauge_batch_size()
{
// switch to the selected device
hipSetDevice( device_id );
uint32 BATCH_SIZE;
for (BATCH_SIZE = params.max_batch_size*1024; BATCH_SIZE >= 16*1024; BATCH_SIZE /= 2)
{
std::pair<uint64,uint64> mem_stats;
// gauge how much memory we'd need
if (aligner->init_alloc( BATCH_SIZE, params, kPairedEnds, false, &mem_stats ) == true)
{
log_stats(stderr, "[%u] estimated allocation sizes: HOST %lu MB, DEVICE %lu MB)\n",
thread_id,
mem_stats.first / (1024*1024),
mem_stats.second / (1024*1024) );
break;
}
}
return BATCH_SIZE;
}
void ComputeThreadPE::do_run()
{
log_visible(stderr, "[%u] nvBowtie cuda driver... started\n", thread_id);
// switch to the selected device
hipSetDevice( device_id );
// build an empty report
FILE* html_output = (params.report != std::string("")) ? fopen( params.report.c_str(), "w" ) : NULL;
if (html_output)
{
// encapsulate the document
{
html::html_object html( html_output );
{
const char* meta_list = "<meta http-equiv=\"refresh\" content=\"1\" />";
{ html::header_object hd( html_output, "Bowtie2 Report", html::style(), meta_list ); }
{ html::body_object body( html_output ); }
}
}
fclose( html_output );
}
Timer timer;
io::SequenceDataDevice& reference_data = *reference_data_device.get();
io::FMIndexDataDevice& driver_data = *driver_data_device.get();
typedef FMIndexDef::type fm_index_type;
fm_index_type fmi = driver_data.index();
fm_index_type rfmi = driver_data.rindex();
size_t free, total;
hipMemGetInfo(&free, &total);
log_stats(stderr, "[%u] device has %ld of %ld MB free\n", thread_id, free/1024/1024, total/1024/1024);
const uint32 BATCH_SIZE = input_thread->batch_size();
log_stats(stderr, "[%u] processing reads in batches of %uK\n", thread_id, BATCH_SIZE/1024);
// setup the output file
aligner->output_file = output_file;
// initialize the aligner
if (aligner->init( thread_id, BATCH_SIZE, params, kPairedEnds ) == false)
return;
nvbio::cuda::check_error("cuda initializations");
hipMemGetInfo(&free, &total);
log_stats(stderr, "[%u] ready to start processing: device has %ld MB free\n", thread_id, free/1024/1024);
size_t stack_size_limit;
hipDeviceGetLimit( &stack_size_limit, hipLimitStackSize );
log_debug(stderr, "[%u] max cuda stack size: %u\n", thread_id, stack_size_limit);
Timer global_timer;
global_timer.start();
UberScoringScheme& scoring_scheme = params.scoring_scheme;
uint32 n_reads = 0;
io::SequenceDataHost local_read_data_host1;
io::SequenceDataHost local_read_data_host2;
io::HostOutputBatchPE local_output_batch_host;
// loop through the batches of reads
while (1)
{
uint32 read_begin;
Timer io_timer;
io_timer.start();
std::pair<io::SequenceDataHost*,io::SequenceDataHost*> read_data_host_pair = input_thread->next( &read_begin );
io::SequenceDataHost* read_data_host1 = read_data_host_pair.first;
io::SequenceDataHost* read_data_host2 = read_data_host_pair.second;
io_timer.stop();
stats.read_io.add( read_data_host1 ? read_data_host1->size() : 0u, io_timer.seconds() );
if (read_data_host1 == NULL ||
read_data_host2 == NULL)
{
log_verbose(stderr, "[%u] end of input reached\n", thread_id);
break;
}
if ((read_data_host1->max_sequence_len() > Aligner::MAX_READ_LEN) ||
(read_data_host2->max_sequence_len() > Aligner::MAX_READ_LEN))
{
log_error(stderr, "[%u] unsupported read length %u (maximum is %u)\n",
thread_id,
nvbio::max(read_data_host1->max_sequence_len(), read_data_host2->max_sequence_len()),
Aligner::MAX_READ_LEN );
break;
}
// make a local copy of the host batch
local_read_data_host1 = *read_data_host1;
local_read_data_host2 = *read_data_host2;
// mark this set as ready to be reused
input_thread->release( read_data_host_pair );
Timer timer;
timer.start();
//aligner.output_file->start_batch( &local_read_data_host1, &local_read_data_host2 );
local_output_batch_host.read_data[0] = &local_read_data_host1;
local_output_batch_host.read_data[1] = &local_read_data_host2;
io::SequenceDataDevice read_data1( local_read_data_host1/*, io::ReadDataDevice::READS | io::ReadDataDevice::QUALS*/ );
io::SequenceDataDevice read_data2( local_read_data_host2/*, io::ReadDataDevice::READS | io::ReadDataDevice::QUALS*/ );
timer.stop();
stats.read_HtoD.add( read_data1.size(), timer.seconds() );
const uint32 count = read_data1.size();
log_info(stderr, "[%u] aligning reads [%u, %u]\n", thread_id, read_begin, read_begin + count - 1u);
log_verbose(stderr, "[%u] %u reads\n", thread_id, count);
log_verbose(stderr, "[%u] %.3f M bps (%.1f MB)\n", thread_id,
float(read_data1.bps() + read_data2.bps())/1.0e6f,
float(read_data1.words()*sizeof(uint32)+read_data1.bps()*sizeof(char))/float(1024*1024)+
float(read_data2.words()*sizeof(uint32)+read_data2.bps()*sizeof(char))/float(1024*1024));
log_verbose(stderr, "[%u] %.1f bps/read (min: %u, max: %u)\n", thread_id,
float(read_data1.bps()+read_data2.bps())/float(read_data1.size()+read_data2.size()),
nvbio::min( read_data1.min_sequence_len(), read_data2.min_sequence_len() ),
nvbio::max( read_data1.max_sequence_len(), read_data2.max_sequence_len() ));
if (params.mode == AllMapping)
{
log_error(stderr, "[%u] paired-end all-mapping is not yet supported!\n", thread_id);
exit(1);
}
else
{
if (params.scoring_mode == EditDistanceMode)
{
best_approx_ed(
*aligner,
params,
fmi,
rfmi,
scoring_scheme,
reference_data,
driver_data,
read_data1,
read_data2,
local_output_batch_host,
stats );
}
else
{
best_approx_sw(
*aligner,
params,
fmi,
rfmi,
scoring_scheme,
reference_data,
driver_data,
read_data1,
read_data2,
local_output_batch_host,
stats );
}
}
global_timer.stop();
stats.global_time += global_timer.seconds();
global_timer.start();
//aligner.output_file->end_batch();
// increase the total reads counter
n_reads += count;
log_verbose(stderr, "[%u] %.1f K reads/s\n", thread_id, 1.0e-3f * float(n_reads) / stats.global_time);
}
global_timer.stop();
stats.global_time += global_timer.seconds();
if (params.report.length())
nvbio::bowtie2::cuda::generate_device_report( thread_id, stats, stats.concordant, params.report.c_str() );
log_visible(stderr, "[%u] nvBowtie cuda driver... done\n", thread_id);
log_stats(stderr, "[%u] total : %.2f sec (avg: %.1fK reads/s).\n", thread_id, stats.global_time, 1.0e-3f * float(n_reads)/stats.global_time);
log_stats(stderr, "[%u] mapping : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.map.time, 1.0e-6f * stats.map.avg_speed(), 1.0e-6f * stats.map.max_speed, stats.map.device_time);
log_stats(stderr, "[%u] scoring : %.2f sec (avg: %.1fM reads/s, max: %.3fM reads/s, %.2f device sec).).\n", thread_id, stats.scoring_pipe.time, 1.0e-6f * stats.scoring_pipe.avg_speed(), 1.0e-6f * stats.scoring_pipe.max_speed, stats.scoring_pipe.device_time);
log_stats(stderr, "[%u] selecting : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.select.time, 1.0e-6f * stats.select.avg_speed(), 1.0e-6f * stats.select.max_speed, stats.select.device_time);
log_stats(stderr, "[%u] sorting : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.sort.time, 1.0e-6f * stats.sort.avg_speed(), 1.0e-6f * stats.sort.max_speed, stats.sort.device_time);
log_stats(stderr, "[%u] scoring(a) : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.score.time, 1.0e-6f * stats.score.avg_speed(), 1.0e-6f * stats.score.max_speed, stats.score.device_time);
log_stats(stderr, "[%u] scoring(o) : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.opposite_score.time, 1.0e-6f * stats.opposite_score.avg_speed(), 1.0e-6f * stats.opposite_score.max_speed, stats.opposite_score.device_time);
log_stats(stderr, "[%u] locating : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.locate.time, 1.0e-6f * stats.locate.avg_speed(), 1.0e-6f * stats.locate.max_speed, stats.locate.device_time);
log_stats(stderr, "[%u] backtracing(a) : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.backtrack.time, 1.0e-6f * stats.backtrack.avg_speed(), 1.0e-6f * stats.backtrack.max_speed, stats.backtrack.device_time);
log_stats(stderr, "[%u] backtracing(o) : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.backtrack_opposite.time, 1.0e-6f * stats.backtrack_opposite.avg_speed(), 1.0e-6f * stats.backtrack_opposite.max_speed, stats.backtrack_opposite.device_time);
log_stats(stderr, "[%u] finalizing : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.finalize.time, 1.0e-6f * stats.finalize.avg_speed(), 1.0e-6f * stats.finalize.max_speed, stats.finalize.device_time);
log_stats(stderr, "[%u] results DtoH : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.alignments_DtoH.time, 1.0e-6f * stats.alignments_DtoH.avg_speed(), 1.0e-6f * stats.alignments_DtoH.max_speed);
log_stats(stderr, "[%u] results I/O : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.io.time, 1.0e-6f * stats.io.avg_speed(), 1.0e-6f * stats.io.max_speed);
log_stats(stderr, "[%u] reads HtoD : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.read_HtoD.time, 1.0e-6f * stats.read_HtoD.avg_speed(), 1.0e-6f * stats.read_HtoD.max_speed);
log_stats(stderr, "[%u] reads I/O : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.read_io.time, 1.0e-6f * stats.read_io.avg_speed(), 1.0e-6f * stats.read_io.max_speed);
}
void ComputeThreadPE::run()
{
try {
do_run();
}
catch (nvbio::cuda_error &e)
{
log_error(stderr, "caught a nvbio::cuda_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::bad_alloc &e)
{
log_error(stderr, "caught a nvbio::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::logic_error &e)
{
log_error(stderr, "caught a nvbio::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::runtime_error &e)
{
log_error(stderr, "caught a nvbio::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::bad_alloc &e)
{
log_error(stderr, "caught a std::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::logic_error &e)
{
log_error(stderr, "caught a std::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::runtime_error &e)
{
log_error(stderr, "caught a std::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (...)
{
log_error(stderr, "caught an unknown exception!\n");
}
}
} // namespace cuda
} // namespace bowtie2
} // namespace nvbio
| 73464be9ee85105752972f2ad519d5a1a339611f.cu | /*
* nvbio
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//#define NVBIO_ENABLE_PROFILING
#define MOD_NAMESPACE
#define MOD_NAMESPACE_BEGIN namespace bowtie2 { namespace driver {
#define MOD_NAMESPACE_END }}
#define MOD_NAMESPACE_NAME bowtie2::driver
#include <nvBowtie/bowtie2/cuda/compute_thread.h>
#include <nvBowtie/bowtie2/cuda/defs.h>
#include <nvBowtie/bowtie2/cuda/fmindex_def.h>
#include <nvBowtie/bowtie2/cuda/params.h>
#include <nvBowtie/bowtie2/cuda/stats.h>
#include <nvBowtie/bowtie2/cuda/persist.h>
#include <nvBowtie/bowtie2/cuda/scoring.h>
#include <nvBowtie/bowtie2/cuda/mapq.h>
#include <nvBowtie/bowtie2/cuda/aligner.h>
#include <nvBowtie/bowtie2/cuda/aligner_inst.h>
#include <nvBowtie/bowtie2/cuda/input_thread.h>
#include <nvbio/basic/cuda/arch.h>
#include <nvbio/basic/timer.h>
#include <nvbio/basic/console.h>
#include <nvbio/basic/options.h>
#include <nvbio/basic/threads.h>
#include <nvbio/basic/atomics.h>
#include <nvbio/basic/html.h>
#include <nvbio/basic/version.h>
#include <nvbio/fmindex/bwt.h>
#include <nvbio/fmindex/ssa.h>
#include <nvbio/fmindex/fmindex.h>
#include <nvbio/fmindex/fmindex_device.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <vector>
#include <algorithm>
#include <numeric>
#include <functional>
namespace nvbio {
namespace bowtie2 {
namespace cuda {
ComputeThreadSE::ComputeThreadSE(
const uint32 _thread_id,
const uint32 _device_id,
const io::SequenceData& _reference_data,
const io::FMIndexData& _driver_data,
const std::map<std::string,std::string>& _options,
const Params& _params,
Stats& _stats) :
thread_id( _thread_id ),
device_id( _device_id ),
reference_data_host( _reference_data ),
driver_data_host( _driver_data ),
options( _options ),
input_thread( NULL ),
output_file( NULL ),
params( _params ),
stats( _stats )
{
log_visible(stderr, "[%u] nvBowtie cuda driver created on device %u\n", thread_id, device_id);
// initialize the selected device
cudaSetDevice( device_id );
cudaSetDeviceFlags( cudaDeviceMapHost | cudaDeviceLmemResizeToMax );
aligner = SharedPointer<Aligner>( new Aligner() );
Timer timer;
timer.start();
const bool need_reverse =
(params.allow_sub == 0 && USE_REVERSE_INDEX) ||
(params.allow_sub == 1 && params.subseed_len == 0 && params.mode == BestMappingApprox);
reference_data_device.reset( new io::SequenceDataDevice( reference_data_host ) );
driver_data_device.reset( new io::FMIndexDataDevice( driver_data_host,
io::FMIndexDataDevice::FORWARD |
(need_reverse ? io::FMIndexDataDevice::REVERSE : 0u) |
io::FMIndexDataDevice::SA ) );
timer.stop();
log_stats(stderr, "[%u] allocated device driver data (%.2f GB - %.1fs)\n", thread_id, float(driver_data_device->allocated()) / 1.0e9f, timer.seconds() );
}
// gauge the favourite batch size
//
uint32 ComputeThreadSE::gauge_batch_size()
{
// switch to the selected device
cudaSetDevice( device_id );
uint32 BATCH_SIZE;
for (BATCH_SIZE = params.max_batch_size*1024; BATCH_SIZE >= 16*1024; BATCH_SIZE /= 2)
{
std::pair<uint64,uint64> mem_stats;
// gauge how much memory we'd need
if (aligner->init_alloc( BATCH_SIZE, params, kSingleEnd, false, &mem_stats ) == true)
{
log_stats(stderr, "[%u] estimated allocation sizes: HOST %lu MB, DEVICE %lu MB)\n",
thread_id,
mem_stats.first / (1024*1024),
mem_stats.second / (1024*1024) );
break;
}
}
return BATCH_SIZE;
}
void ComputeThreadSE::do_run()
{
log_visible(stderr, "[%u] nvBowtie cuda driver... started\n", thread_id);
// switch to the selected device
cudaSetDevice( device_id );
// build an empty report
FILE* html_output = (params.report != std::string("")) ? fopen( params.report.c_str(), "w" ) : NULL;
if (html_output)
{
// encapsulate the document
{
html::html_object html( html_output );
{
const char* meta_list = "<meta http-equiv=\"refresh\" content=\"1\" />";
{ html::header_object hd( html_output, "Bowtie2 Report", html::style(), meta_list ); }
{ html::body_object body( html_output ); }
}
}
fclose( html_output );
}
Timer timer;
io::SequenceDataDevice& reference_data = *reference_data_device.get();
io::FMIndexDataDevice& driver_data = *driver_data_device.get();
log_stats(stderr, "[%u] allocated device driver data (%.2f GB - %.1fs)\n", thread_id, float(driver_data.allocated()) / 1.0e9f, timer.seconds() );
typedef FMIndexDef::type fm_index_type;
fm_index_type fmi = driver_data.index();
fm_index_type rfmi = driver_data.rindex();
size_t free, total;
cudaMemGetInfo(&free, &total);
log_stats(stderr, "[%u] device has %ld of %ld MB free\n", thread_id, free/1024/1024, total/1024/1024);
const uint32 BATCH_SIZE = input_thread->batch_size();
log_stats(stderr, "[%u] processing reads in batches of %uK\n", thread_id, BATCH_SIZE/1024);
// setup the output file
aligner->output_file = output_file;
// initialize the aligner
if (aligner->init( thread_id, BATCH_SIZE, params, kSingleEnd ) == false)
return;
nvbio::cuda::check_error("cuda initializations");
cudaMemGetInfo(&free, &total);
log_stats(stderr, "[%u] ready to start processing: device has %ld MB free\n", thread_id, free/1024/1024);
Timer global_timer;
global_timer.start();
UberScoringScheme& scoring_scheme = params.scoring_scheme;
uint32 n_reads = 0;
io::SequenceDataHost local_read_data_host;
io::HostOutputBatchSE local_output_batch_host;
// loop through the batches of reads
while (1)
{
uint32 read_begin;
Timer io_timer;
io_timer.start();
io::SequenceDataHost* read_data_host = input_thread->next( &read_begin );
io_timer.stop();
stats.read_io.add( read_data_host ? read_data_host->size() : 0u, io_timer.seconds() );
if (read_data_host == NULL)
{
log_verbose(stderr, "[%u] end of input reached\n", thread_id);
break;
}
if (read_data_host->max_sequence_len() > Aligner::MAX_READ_LEN)
{
log_error(stderr, "[%u] unsupported read length %u (maximum is %u)\n", thread_id,
read_data_host->max_sequence_len(),
Aligner::MAX_READ_LEN );
break;
}
// make a local copy of the host batch
local_read_data_host = *read_data_host;
// mark this set as ready to be reused
input_thread->release( read_data_host );
Timer timer;
timer.start();
//aligner.output_file->start_batch( &local_read_data_host );
local_output_batch_host.read_data = &local_read_data_host;
io::SequenceDataDevice read_data( local_read_data_host );
cudaThreadSynchronize();
timer.stop();
stats.read_HtoD.add( read_data.size(), timer.seconds() );
const uint32 count = read_data.size();
log_info(stderr, "[%u] aligning reads [%u, %u]\n", thread_id, read_begin, read_begin + count - 1u);
log_verbose(stderr, "[%u] %u reads\n", thread_id, count);
log_verbose(stderr, "[%u] %.3f M bps (%.1f MB)\n", thread_id, float(read_data.bps())/1.0e6f, float(read_data.words()*sizeof(uint32)+read_data.bps()*sizeof(char))/float(1024*1024));
log_verbose(stderr, "[%u] %.1f bps/read (min: %u, max: %u)\n", thread_id, float(read_data.bps())/float(read_data.size()), read_data.min_sequence_len(), read_data.max_sequence_len());
if (params.mode == AllMapping)
{
if (params.scoring_mode == EditDistanceMode)
{
all_ed(
*aligner,
params,
fmi,
rfmi,
scoring_scheme,
reference_data,
driver_data,
read_data,
local_output_batch_host,
stats );
}
else
{
all_sw(
*aligner,
params,
fmi,
rfmi,
scoring_scheme,
reference_data,
driver_data,
read_data,
local_output_batch_host,
stats );
}
}
else
{
if (params.scoring_mode == EditDistanceMode)
{
best_approx_ed(
*aligner,
params,
fmi,
rfmi,
scoring_scheme,
reference_data,
driver_data,
read_data,
local_output_batch_host,
stats );
}
else
{
best_approx_sw(
*aligner,
params,
fmi,
rfmi,
scoring_scheme,
reference_data,
driver_data,
read_data,
local_output_batch_host,
stats );
}
}
global_timer.stop();
stats.global_time += global_timer.seconds();
global_timer.start();
//aligner->output_file->end_batch();
// increase the total reads counter
n_reads += count;
log_verbose(stderr, "[%u] %.1f K reads/s\n", thread_id, 1.0e-3f * float(n_reads) / stats.global_time);
}
global_timer.stop();
stats.global_time += global_timer.seconds();
stats.n_reads = n_reads;
if (params.report.length())
nvbio::bowtie2::cuda::generate_device_report( thread_id, stats, stats.mate1, params.report.c_str() );
log_visible(stderr, "[%u] nvBowtie cuda driver... done\n", thread_id);
log_stats(stderr, "[%u] total : %.2f sec (avg: %.1fK reads/s).\n", thread_id, stats.global_time, 1.0e-3f * float(n_reads)/stats.global_time);
log_stats(stderr, "[%u] mapping : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.map.time, 1.0e-6f * stats.map.avg_speed(), 1.0e-6f * stats.map.max_speed, stats.map.device_time);
log_stats(stderr, "[%u] selecting : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.select.time, 1.0e-6f * stats.select.avg_speed(), 1.0e-6f * stats.select.max_speed, stats.select.device_time);
log_stats(stderr, "[%u] sorting : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.sort.time, 1.0e-6f * stats.sort.avg_speed(), 1.0e-6f * stats.sort.max_speed, stats.sort.device_time);
log_stats(stderr, "[%u] scoring : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.score.time, 1.0e-6f * stats.score.avg_speed(), 1.0e-6f * stats.score.max_speed, stats.score.device_time);
log_stats(stderr, "[%u] locating : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.locate.time, 1.0e-6f * stats.locate.avg_speed(), 1.0e-6f * stats.locate.max_speed, stats.locate.device_time);
log_stats(stderr, "[%u] backtracking : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.backtrack.time, 1.0e-6f * stats.backtrack.avg_speed(), 1.0e-6f * stats.backtrack.max_speed, stats.backtrack.device_time);
log_stats(stderr, "[%u] finalizing : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.finalize.time, 1.0e-6f * stats.finalize.avg_speed(), 1.0e-6f * stats.finalize.max_speed, stats.finalize.device_time);
log_stats(stderr, "[%u] results DtoH : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.alignments_DtoH.time, 1.0e-6f * stats.alignments_DtoH.avg_speed(), 1.0e-6f * stats.alignments_DtoH.max_speed);
log_stats(stderr, "[%u] results I/O : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.io.time, 1.0e-6f * stats.io.avg_speed(), 1.0e-6f * stats.io.max_speed);
log_stats(stderr, "[%u] reads HtoD : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.read_HtoD.time, 1.0e-6f * stats.read_HtoD.avg_speed(), 1.0e-6f * stats.read_HtoD.max_speed);
log_stats(stderr, "[%u] reads I/O : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.read_io.time, 1.0e-6f * stats.read_io.avg_speed(), 1.0e-6f * stats.read_io.max_speed);
}
void ComputeThreadSE::run()
{
try {
do_run();
}
catch (nvbio::cuda_error &e)
{
log_error(stderr, "caught a nvbio::cuda_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::bad_alloc &e)
{
log_error(stderr, "caught a nvbio::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::logic_error &e)
{
log_error(stderr, "caught a nvbio::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::runtime_error &e)
{
log_error(stderr, "caught a nvbio::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::bad_alloc &e)
{
log_error(stderr, "caught a std::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::logic_error &e)
{
log_error(stderr, "caught a std::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::runtime_error &e)
{
log_error(stderr, "caught a std::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (...)
{
log_error(stderr, "caught an unknown exception!\n");
}
}
ComputeThreadPE::ComputeThreadPE(
const uint32 _thread_id,
const uint32 _device_id,
const io::SequenceData& _reference_data,
const io::FMIndexData& _driver_data,
const std::map<std::string,std::string>& _options,
const Params& _params,
Stats& _stats) :
thread_id( _thread_id ),
device_id( _device_id ),
reference_data_host( _reference_data ),
driver_data_host( _driver_data ),
options( _options ),
input_thread( NULL ),
output_file( NULL ),
params( _params ),
stats( _stats )
{
log_visible(stderr, "[%u] nvBowtie cuda driver created on device %u\n", thread_id, device_id);
// initialize the selected device
cudaSetDevice( device_id );
cudaSetDeviceFlags( cudaDeviceMapHost | cudaDeviceLmemResizeToMax );
aligner = SharedPointer<Aligner>( new Aligner() );
Timer timer;
timer.start();
const bool need_reverse =
(params.allow_sub == 0 && USE_REVERSE_INDEX) ||
(params.allow_sub == 1 && params.subseed_len == 0 && params.mode == BestMappingApprox);
reference_data_device.reset( new io::SequenceDataDevice( reference_data_host ) );
driver_data_device.reset( new io::FMIndexDataDevice( driver_data_host,
io::FMIndexDataDevice::FORWARD |
(need_reverse ? io::FMIndexDataDevice::REVERSE : 0u) |
io::FMIndexDataDevice::SA ) );
timer.stop();
log_stats(stderr, "[%u] allocated device driver data (%.2f GB - %.1fs)\n", thread_id, float(driver_data_device->allocated()) / 1.0e9f, timer.seconds() );
}
// gauge the favourite batch size
//
uint32 ComputeThreadPE::gauge_batch_size()
{
// switch to the selected device
cudaSetDevice( device_id );
uint32 BATCH_SIZE;
for (BATCH_SIZE = params.max_batch_size*1024; BATCH_SIZE >= 16*1024; BATCH_SIZE /= 2)
{
std::pair<uint64,uint64> mem_stats;
// gauge how much memory we'd need
if (aligner->init_alloc( BATCH_SIZE, params, kPairedEnds, false, &mem_stats ) == true)
{
log_stats(stderr, "[%u] estimated allocation sizes: HOST %lu MB, DEVICE %lu MB)\n",
thread_id,
mem_stats.first / (1024*1024),
mem_stats.second / (1024*1024) );
break;
}
}
return BATCH_SIZE;
}
void ComputeThreadPE::do_run()
{
log_visible(stderr, "[%u] nvBowtie cuda driver... started\n", thread_id);
// switch to the selected device
cudaSetDevice( device_id );
// build an empty report
FILE* html_output = (params.report != std::string("")) ? fopen( params.report.c_str(), "w" ) : NULL;
if (html_output)
{
// encapsulate the document
{
html::html_object html( html_output );
{
const char* meta_list = "<meta http-equiv=\"refresh\" content=\"1\" />";
{ html::header_object hd( html_output, "Bowtie2 Report", html::style(), meta_list ); }
{ html::body_object body( html_output ); }
}
}
fclose( html_output );
}
Timer timer;
io::SequenceDataDevice& reference_data = *reference_data_device.get();
io::FMIndexDataDevice& driver_data = *driver_data_device.get();
typedef FMIndexDef::type fm_index_type;
fm_index_type fmi = driver_data.index();
fm_index_type rfmi = driver_data.rindex();
size_t free, total;
cudaMemGetInfo(&free, &total);
log_stats(stderr, "[%u] device has %ld of %ld MB free\n", thread_id, free/1024/1024, total/1024/1024);
const uint32 BATCH_SIZE = input_thread->batch_size();
log_stats(stderr, "[%u] processing reads in batches of %uK\n", thread_id, BATCH_SIZE/1024);
// setup the output file
aligner->output_file = output_file;
// initialize the aligner
if (aligner->init( thread_id, BATCH_SIZE, params, kPairedEnds ) == false)
return;
nvbio::cuda::check_error("cuda initializations");
cudaMemGetInfo(&free, &total);
log_stats(stderr, "[%u] ready to start processing: device has %ld MB free\n", thread_id, free/1024/1024);
size_t stack_size_limit;
cudaDeviceGetLimit( &stack_size_limit, cudaLimitStackSize );
log_debug(stderr, "[%u] max cuda stack size: %u\n", thread_id, stack_size_limit);
Timer global_timer;
global_timer.start();
UberScoringScheme& scoring_scheme = params.scoring_scheme;
uint32 n_reads = 0;
io::SequenceDataHost local_read_data_host1;
io::SequenceDataHost local_read_data_host2;
io::HostOutputBatchPE local_output_batch_host;
// loop through the batches of reads
while (1)
{
uint32 read_begin;
Timer io_timer;
io_timer.start();
std::pair<io::SequenceDataHost*,io::SequenceDataHost*> read_data_host_pair = input_thread->next( &read_begin );
io::SequenceDataHost* read_data_host1 = read_data_host_pair.first;
io::SequenceDataHost* read_data_host2 = read_data_host_pair.second;
io_timer.stop();
stats.read_io.add( read_data_host1 ? read_data_host1->size() : 0u, io_timer.seconds() );
if (read_data_host1 == NULL ||
read_data_host2 == NULL)
{
log_verbose(stderr, "[%u] end of input reached\n", thread_id);
break;
}
if ((read_data_host1->max_sequence_len() > Aligner::MAX_READ_LEN) ||
(read_data_host2->max_sequence_len() > Aligner::MAX_READ_LEN))
{
log_error(stderr, "[%u] unsupported read length %u (maximum is %u)\n",
thread_id,
nvbio::max(read_data_host1->max_sequence_len(), read_data_host2->max_sequence_len()),
Aligner::MAX_READ_LEN );
break;
}
// make a local copy of the host batch
local_read_data_host1 = *read_data_host1;
local_read_data_host2 = *read_data_host2;
// mark this set as ready to be reused
input_thread->release( read_data_host_pair );
Timer timer;
timer.start();
//aligner.output_file->start_batch( &local_read_data_host1, &local_read_data_host2 );
local_output_batch_host.read_data[0] = &local_read_data_host1;
local_output_batch_host.read_data[1] = &local_read_data_host2;
io::SequenceDataDevice read_data1( local_read_data_host1/*, io::ReadDataDevice::READS | io::ReadDataDevice::QUALS*/ );
io::SequenceDataDevice read_data2( local_read_data_host2/*, io::ReadDataDevice::READS | io::ReadDataDevice::QUALS*/ );
timer.stop();
stats.read_HtoD.add( read_data1.size(), timer.seconds() );
const uint32 count = read_data1.size();
log_info(stderr, "[%u] aligning reads [%u, %u]\n", thread_id, read_begin, read_begin + count - 1u);
log_verbose(stderr, "[%u] %u reads\n", thread_id, count);
log_verbose(stderr, "[%u] %.3f M bps (%.1f MB)\n", thread_id,
float(read_data1.bps() + read_data2.bps())/1.0e6f,
float(read_data1.words()*sizeof(uint32)+read_data1.bps()*sizeof(char))/float(1024*1024)+
float(read_data2.words()*sizeof(uint32)+read_data2.bps()*sizeof(char))/float(1024*1024));
log_verbose(stderr, "[%u] %.1f bps/read (min: %u, max: %u)\n", thread_id,
float(read_data1.bps()+read_data2.bps())/float(read_data1.size()+read_data2.size()),
nvbio::min( read_data1.min_sequence_len(), read_data2.min_sequence_len() ),
nvbio::max( read_data1.max_sequence_len(), read_data2.max_sequence_len() ));
if (params.mode == AllMapping)
{
log_error(stderr, "[%u] paired-end all-mapping is not yet supported!\n", thread_id);
exit(1);
}
else
{
if (params.scoring_mode == EditDistanceMode)
{
best_approx_ed(
*aligner,
params,
fmi,
rfmi,
scoring_scheme,
reference_data,
driver_data,
read_data1,
read_data2,
local_output_batch_host,
stats );
}
else
{
best_approx_sw(
*aligner,
params,
fmi,
rfmi,
scoring_scheme,
reference_data,
driver_data,
read_data1,
read_data2,
local_output_batch_host,
stats );
}
}
global_timer.stop();
stats.global_time += global_timer.seconds();
global_timer.start();
//aligner.output_file->end_batch();
// increase the total reads counter
n_reads += count;
log_verbose(stderr, "[%u] %.1f K reads/s\n", thread_id, 1.0e-3f * float(n_reads) / stats.global_time);
}
global_timer.stop();
stats.global_time += global_timer.seconds();
if (params.report.length())
nvbio::bowtie2::cuda::generate_device_report( thread_id, stats, stats.concordant, params.report.c_str() );
log_visible(stderr, "[%u] nvBowtie cuda driver... done\n", thread_id);
log_stats(stderr, "[%u] total : %.2f sec (avg: %.1fK reads/s).\n", thread_id, stats.global_time, 1.0e-3f * float(n_reads)/stats.global_time);
log_stats(stderr, "[%u] mapping : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.map.time, 1.0e-6f * stats.map.avg_speed(), 1.0e-6f * stats.map.max_speed, stats.map.device_time);
log_stats(stderr, "[%u] scoring : %.2f sec (avg: %.1fM reads/s, max: %.3fM reads/s, %.2f device sec).).\n", thread_id, stats.scoring_pipe.time, 1.0e-6f * stats.scoring_pipe.avg_speed(), 1.0e-6f * stats.scoring_pipe.max_speed, stats.scoring_pipe.device_time);
log_stats(stderr, "[%u] selecting : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.select.time, 1.0e-6f * stats.select.avg_speed(), 1.0e-6f * stats.select.max_speed, stats.select.device_time);
log_stats(stderr, "[%u] sorting : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.sort.time, 1.0e-6f * stats.sort.avg_speed(), 1.0e-6f * stats.sort.max_speed, stats.sort.device_time);
log_stats(stderr, "[%u] scoring(a) : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.score.time, 1.0e-6f * stats.score.avg_speed(), 1.0e-6f * stats.score.max_speed, stats.score.device_time);
log_stats(stderr, "[%u] scoring(o) : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.opposite_score.time, 1.0e-6f * stats.opposite_score.avg_speed(), 1.0e-6f * stats.opposite_score.max_speed, stats.opposite_score.device_time);
log_stats(stderr, "[%u] locating : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.locate.time, 1.0e-6f * stats.locate.avg_speed(), 1.0e-6f * stats.locate.max_speed, stats.locate.device_time);
log_stats(stderr, "[%u] backtracing(a) : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.backtrack.time, 1.0e-6f * stats.backtrack.avg_speed(), 1.0e-6f * stats.backtrack.max_speed, stats.backtrack.device_time);
log_stats(stderr, "[%u] backtracing(o) : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.backtrack_opposite.time, 1.0e-6f * stats.backtrack_opposite.avg_speed(), 1.0e-6f * stats.backtrack_opposite.max_speed, stats.backtrack_opposite.device_time);
log_stats(stderr, "[%u] finalizing : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.finalize.time, 1.0e-6f * stats.finalize.avg_speed(), 1.0e-6f * stats.finalize.max_speed, stats.finalize.device_time);
log_stats(stderr, "[%u] results DtoH : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.alignments_DtoH.time, 1.0e-6f * stats.alignments_DtoH.avg_speed(), 1.0e-6f * stats.alignments_DtoH.max_speed);
log_stats(stderr, "[%u] results I/O : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.io.time, 1.0e-6f * stats.io.avg_speed(), 1.0e-6f * stats.io.max_speed);
log_stats(stderr, "[%u] reads HtoD : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.read_HtoD.time, 1.0e-6f * stats.read_HtoD.avg_speed(), 1.0e-6f * stats.read_HtoD.max_speed);
log_stats(stderr, "[%u] reads I/O : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.read_io.time, 1.0e-6f * stats.read_io.avg_speed(), 1.0e-6f * stats.read_io.max_speed);
}
void ComputeThreadPE::run()
{
try {
do_run();
}
catch (nvbio::cuda_error &e)
{
log_error(stderr, "caught a nvbio::cuda_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::bad_alloc &e)
{
log_error(stderr, "caught a nvbio::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::logic_error &e)
{
log_error(stderr, "caught a nvbio::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::runtime_error &e)
{
log_error(stderr, "caught a nvbio::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::bad_alloc &e)
{
log_error(stderr, "caught a std::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::logic_error &e)
{
log_error(stderr, "caught a std::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::runtime_error &e)
{
log_error(stderr, "caught a std::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (...)
{
log_error(stderr, "caught an unknown exception!\n");
}
}
} // namespace cuda
} // namespace bowtie2
} // namespace nvbio
|
e42e04248b57afbff273339377050fc9933a2622.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Academic License - for use in teaching, academic research, and meeting
// course requirements at degree granting institutions only. Not for
// government, commercial, or other organizational use.
// File: colourSegmentation.cu
//
// GPU Coder version : 2.0
// CUDA/C/C++ source code generated on : 05-Nov-2020 09:20:00
//
// Include Files
#include "colourSegmentation.h"
#include "MWCudaDimUtility.hpp"
#include "MWLaunchParametersUtilities.hpp"
// Type Definitions
struct cell_wrap_7
{
double f1[9];
};
// Function Declarations
static __global__ void colourSegmentation_kernel1(const unsigned char
colourBalancedImage[2184480], double Xin[2184480]);
static __global__ void colourSegmentation_kernel10(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel11(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
static __global__ void colourSegmentation_kernel12(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel13(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
static __global__ void colourSegmentation_kernel14(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel15(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
static __global__ void colourSegmentation_kernel16(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel17(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
static __global__ void colourSegmentation_kernel18(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel19(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
static __global__ void colourSegmentation_kernel2(const double Xin[2184480],
double HSVImage[2184480]);
static __global__ void colourSegmentation_kernel20(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel21(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
static __global__ void colourSegmentation_kernel22(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel23(const bool centerColourMask
[23409], cell_wrap_7 outputs[1]);
static __global__ void colourSegmentation_kernel24(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel25(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel26(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel27(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel28(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel29(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel3(double HSVImage[2184480]);
static __global__ void colourSegmentation_kernel30(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel31(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel32(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel4(bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel5(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
static __global__ void colourSegmentation_kernel6(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel7(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
static __global__ void colourSegmentation_kernel8(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel9(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
// Function Definitions
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const unsigned char colourBalancedImage[2184480]
// double Xin[2184480]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel1(
const unsigned char colourBalancedImage[2184480], double Xin[2184480])
{
int ix;
ix = static_cast<int>(mwGetGlobalThreadIndex());
if (ix < 2184480) {
// Input:
// colourBalancedImage = RGB image 984x740x3
// centerOfObjectX = Objects center postion in the x-axis
// centerOfObjectY = Objects center postion in the y-axis
// Output:
// colourSegmentationMask = bitmask of the segmented object
// Transform the Image into the HSV colour space
Xin[ix] = static_cast<double>(colourBalancedImage[ix]) / 255.0;
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel10(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[(xpageoffset + 51 * ix) + 5202] = HSVImage_data[xpageoffset
+ 51 * ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel11(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(HSVImage[(i +
xpageoffset) + 984 * (i2 + ix)] > 70.0)) && (static_cast<int>(HSVImage[(i
+ xpageoffset) + 984 * (i2 + ix)] < 155.0)))) || (static_cast<int>
(HSVImage[(i + xpageoffset) + 984 * (i2 + ix)] == 70.0)))) && (
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] >
0.2)))) && (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel12(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[(xpageoffset + 51 * ix) + 7803] = HSVImage_data[xpageoffset
+ 51 * ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel13(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(HSVImage[(i +
xpageoffset) + 984 * (i2 + ix)] > 155.0)) && (static_cast<int>(HSVImage[(i
+ xpageoffset) + 984 * (i2 + ix)] < 205.0)))) || (static_cast<int>
(HSVImage[(i + xpageoffset) + 984 * (i2 + ix)] == 155.0)))) && (
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] >
0.2)))) && (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel14(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[(xpageoffset + 51 * ix) + 10404] =
HSVImage_data[xpageoffset + 51 * ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel15(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(HSVImage[(i +
xpageoffset) + 984 * (i2 + ix)] > 205.0)) && (static_cast<int>(HSVImage[(i
+ xpageoffset) + 984 * (i2 + ix)] < 260.0)))) || (static_cast<int>
(HSVImage[(i + xpageoffset) + 984 * (i2 + ix)] == 205.0)))) && (
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] >
0.2)))) && (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel16(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[(xpageoffset + 51 * ix) + 13005] =
HSVImage_data[xpageoffset + 51 * ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel17(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(HSVImage[(i +
xpageoffset) + 984 * (i2 + ix)] > 260.0)) && (static_cast<int>(HSVImage[(i
+ xpageoffset) + 984 * (i2 + ix)] < 315.0)))) || (static_cast<int>
(HSVImage[(i + xpageoffset) + 984 * (i2 + ix)] == 260.0)))) && (
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] >
0.2)))) && (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel18(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[(xpageoffset + 51 * ix) + 15606] =
HSVImage_data[xpageoffset + 51 * ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel19(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 1456320]
> 0.6)) || (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] == 0.6)))) && (static_cast<int>((
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] <
0.2)) || (static_cast<int>(HSVImage[((i + xpageoffset) +
984 * (i2 + ix)) + 728160] == 0.2)))));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double Xin[2184480]
// double HSVImage[2184480]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel2(
const double Xin[2184480], double HSVImage[2184480])
{
double varargin_1[3];
double h;
double tmp;
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
double d1;
double d2;
double delta;
double v;
int i;
d = Xin[xpageoffset + 984 * ix];
varargin_1[0] = d;
d1 = Xin[(xpageoffset + 984 * ix) + 728160];
varargin_1[1] = d1;
d2 = Xin[(xpageoffset + 984 * ix) + 1456320];
varargin_1[2] = d2;
v = d;
for (i = 0; i < 2; i++) {
if (v < varargin_1[i + 1]) {
v = varargin_1[i + 1];
}
}
varargin_1[0] = d;
varargin_1[1] = d1;
varargin_1[2] = d2;
tmp = d;
for (i = 0; i < 2; i++) {
if (tmp > varargin_1[i + 1]) {
tmp = varargin_1[i + 1];
}
}
delta = v - tmp;
tmp = delta;
if (delta == 0.0) {
tmp = 1.0;
}
h = 0.0;
if (d == v) {
h = (d1 - d2) / tmp;
}
if (d1 == v) {
h = (d2 - d) / tmp + 2.0;
}
if (d2 == v) {
h = (d - d1) / tmp + 4.0;
}
h /= 6.0;
if (h < 0.0) {
h++;
}
tmp /= v;
if (delta == 0.0) {
h = 0.0;
tmp = 0.0;
}
if (!static_cast<int>(v != 0.0)) {
tmp = 0.0;
}
HSVImage[xpageoffset + 984 * ix] = h;
HSVImage[(xpageoffset + 984 * ix) + 728160] = tmp;
HSVImage[(xpageoffset + 984 * ix) + 1456320] = v;
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel20(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[(xpageoffset + 51 * ix) + 18207] =
HSVImage_data[xpageoffset + 51 * ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel21(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 1456320]
< 0.3)) || (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] == 0.3)))) || (static_cast<int>((
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 1456320]
< 0.6)) && (static_cast<int>((static_cast<int>(HSVImage
[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] < 0.2)) || (static_cast<
int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] == 0.2)))))));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel22(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[(xpageoffset + 51 * ix) + 20808] =
HSVImage_data[xpageoffset + 51 * ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool centerColourMask[23409]
// cell_wrap_7 outputs[1]
// Return Type : void
//
static __global__ __launch_bounds__(32, 1) void colourSegmentation_kernel23(
const bool centerColourMask[23409], cell_wrap_7 outputs[1])
{
double d;
int i;
i = static_cast<int>(mwGetGlobalThreadIndex());
if (i < 9) {
int xpageoffset;
// Sum up all the masks and then determine which mask has the most pixels
xpageoffset = i * 2601;
d = static_cast<double>(centerColourMask[xpageoffset]);
for (int ix = 0; ix < 2600; ix++) {
d += static_cast<double>(centerColourMask[(xpageoffset + ix) + 1]);
}
outputs[0].f1[i] = d;
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel24(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
double d1;
d = HSVImage[(xpageoffset + 984 * ix) + 1456320];
d1 = HSVImage[(xpageoffset + 984 * ix) + 728160];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>(d < 0.3)) || (static_cast<int>(d == 0.3)))) || (
static_cast<int>((static_cast<int>(d < 0.6)) && (static_cast<int>((
static_cast<int>(d1 < 0.2)) || (static_cast<int>(d1 == 0.2)))))));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel25(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
double d1;
d = HSVImage[(xpageoffset + 984 * ix) + 1456320];
d1 = HSVImage[(xpageoffset + 984 * ix) + 728160];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>(d > 0.6)) || (static_cast<int>(d == 0.6)))) && (
static_cast<int>((static_cast<int>(d1 < 0.2)) || (static_cast<int>(d1 ==
0.2)))));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel26(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
d = HSVImage[xpageoffset + 984 * ix];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(d > 260.0)) && (
static_cast<int>(d < 315.0)))) || (static_cast<int>(d == 260.0)))) && (
static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 728160] > 0.2)))) &&
(static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel27(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
d = HSVImage[xpageoffset + 984 * ix];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(d > 205.0)) && (
static_cast<int>(d < 260.0)))) || (static_cast<int>(d == 205.0)))) && (
static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 728160] > 0.2)))) &&
(static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel28(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
d = HSVImage[xpageoffset + 984 * ix];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(d > 155.0)) && (
static_cast<int>(d < 205.0)))) || (static_cast<int>(d == 155.0)))) && (
static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 728160] > 0.2)))) &&
(static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel29(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
d = HSVImage[xpageoffset + 984 * ix];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(d > 70.0)) && (
static_cast<int>(d < 155.0)))) || (static_cast<int>(d == 70.0)))) && (
static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 728160] > 0.2)))) &&
(static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// double HSVImage[2184480]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel3
(double HSVImage[2184480])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
HSVImage[xpageoffset + 984 * ix] *= 360.0;
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel30(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
d = HSVImage[xpageoffset + 984 * ix];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(d > 50.0)) && (
static_cast<int>(d < 70.0)))) || (static_cast<int>(d == 50.0)))) && (
static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 728160] > 0.2)))) &&
(static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel31(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
d = HSVImage[xpageoffset + 984 * ix];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(d > 15.0)) && (
static_cast<int>(d < 50.0)))) || (static_cast<int>(d == 15.0)))) && (
static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 728160] > 0.2)))) &&
(static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel32(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
d = HSVImage[xpageoffset + 984 * ix];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(d < 15.0)) || (
static_cast<int>(d == 315.0)))) || (static_cast<int>(d > 315.0)))) && (
static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 728160] > 0.2)))) &&
(static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel4(bool
centerColourMask[23409])
{
int ix;
ix = static_cast<int>(mwGetGlobalThreadIndex());
if (ix < 23409) {
centerColourMask[ix] = false;
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel5(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(HSVImage[(i +
xpageoffset) + 984 * (i2 + ix)] < 15.0)) || (static_cast<int>(HSVImage[(i
+ xpageoffset) + 984 * (i2 + ix)] == 315.0)))) || (static_cast<int>
(HSVImage[(i + xpageoffset) + 984 * (i2 + ix)] > 315.0)))) && (
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] >
0.2)))) && (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel6(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[xpageoffset + 51 * ix] = HSVImage_data[xpageoffset + 51 *
ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel7(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(HSVImage[(i +
xpageoffset) + 984 * (i2 + ix)] > 15.0)) && (static_cast<int>(HSVImage[(i
+ xpageoffset) + 984 * (i2 + ix)] < 50.0)))) || (static_cast<int>
(HSVImage[(i + xpageoffset) + 984 * (i2 + ix)] == 15.0)))) && (
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] >
0.2)))) && (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel8(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[(xpageoffset + 51 * ix) + 2601] = HSVImage_data[xpageoffset
+ 51 * ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel9(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(HSVImage[(i +
xpageoffset) + 984 * (i2 + ix)] > 50.0)) && (static_cast<int>(HSVImage[(i
+ xpageoffset) + 984 * (i2 + ix)] < 70.0)))) || (static_cast<int>
(HSVImage[(i + xpageoffset) + 984 * (i2 + ix)] == 50.0)))) && (
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] >
0.2)))) && (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] > 0.3)));
}
}
//
// The function takes in a image and a rough postion of the object in the
// image that should be segmented. and it outputs a a bitmask of the the
// object.
// Arguments : const unsigned char colourBalancedImage[2184480]
// double centerOfObjectX
// double centerOfObjectY
// bool colourSegmentationMask[728160]
// Return Type : void
//
void colourSegmentation(const unsigned char colourBalancedImage[2184480], double
centerOfObjectX, double centerOfObjectY, bool colourSegmentationMask[728160])
{
cell_wrap_7 outputs[1];
cell_wrap_7 (*gpu_outputs)[1];
dim3 b_block;
dim3 b_grid;
dim3 block;
dim3 c_block;
dim3 c_grid;
dim3 d_block;
dim3 d_grid;
dim3 e_block;
dim3 e_grid;
dim3 f_block;
dim3 f_grid;
dim3 g_block;
dim3 g_grid;
dim3 grid;
dim3 h_block;
dim3 h_grid;
dim3 i_block;
dim3 i_grid;
double (*gpu_HSVImage)[2184480];
double (*gpu_Xin)[2184480];
double ex;
int HSVImage_size[2];
int b_HSVImage_size[2];
int c_HSVImage_size[2];
int d_HSVImage_size[2];
int e_HSVImage_size[2];
int f_HSVImage_size[2];
int g_HSVImage_size[2];
int h_HSVImage_size[2];
int i_HSVImage_size[2];
int (*b_gpu_HSVImage_size)[2];
int (*c_gpu_HSVImage_size)[2];
int (*d_gpu_HSVImage_size)[2];
int (*e_gpu_HSVImage_size)[2];
int (*f_gpu_HSVImage_size)[2];
int (*g_gpu_HSVImage_size)[2];
int (*gpu_HSVImage_size)[2];
int (*h_gpu_HSVImage_size)[2];
int (*i_gpu_HSVImage_size)[2];
int b_i;
int b_i1;
int i;
int i1;
int i2;
int i3;
unsigned char (*gpu_colourBalancedImage)[2184480];
bool (*gpu_HSVImage_data)[728160];
bool (*gpu_colourSegmentationMask)[728160];
bool (*gpu_centerColourMask)[23409];
bool validLaunchParams;
hipMalloc(&gpu_colourSegmentationMask, 728160UL);
hipMalloc(&gpu_outputs, 72UL);
hipMalloc(&i_gpu_HSVImage_size, 8UL);
hipMalloc(&h_gpu_HSVImage_size, 8UL);
hipMalloc(&g_gpu_HSVImage_size, 8UL);
hipMalloc(&f_gpu_HSVImage_size, 8UL);
hipMalloc(&e_gpu_HSVImage_size, 8UL);
hipMalloc(&d_gpu_HSVImage_size, 8UL);
hipMalloc(&c_gpu_HSVImage_size, 8UL);
hipMalloc(&b_gpu_HSVImage_size, 8UL);
hipMalloc(&gpu_HSVImage_data, 728160UL);
hipMalloc(&gpu_centerColourMask, 23409UL);
hipMalloc(&gpu_HSVImage_size, 8UL);
hipMalloc(&gpu_HSVImage, 17475840UL);
hipMalloc(&gpu_Xin, 17475840UL);
hipMalloc(&gpu_colourBalancedImage, 2184480UL);
// Input:
// colourBalancedImage = RGB image 984x740x3
// centerOfObjectX = Objects center postion in the x-axis
// centerOfObjectY = Objects center postion in the y-axis
// Output:
// colourSegmentationMask = bitmask of the segmented object
// Transform the Image into the HSV colour space
hipMemcpy(gpu_colourBalancedImage, (void *)&colourBalancedImage[0], 2184480UL,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( colourSegmentation_kernel1), dim3(dim3(4267U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_colourBalancedImage, *gpu_Xin);
hipLaunchKernelGGL(( colourSegmentation_kernel2), dim3(dim3(1423U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_Xin, *gpu_HSVImage);
// Standardize the H-values from 0-1 to 0-360
hipLaunchKernelGGL(( colourSegmentation_kernel3), dim3(dim3(1423U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage);
// Make a small square at the center of the object and determine which
// colour is most prevalent in the square
// Choose square size
// Calculate the start and end values for X and Y
// Take out the sqaure area from the image
if (centerOfObjectX - 25.0 > centerOfObjectX + 25.0) {
i = 0;
i1 = 0;
} else {
i = static_cast<int>(centerOfObjectX - 25.0) - 1;
i1 = static_cast<int>(centerOfObjectX + 25.0);
}
if (centerOfObjectY - 25.0 > centerOfObjectY + 25.0) {
i2 = 0;
i3 = 0;
} else {
i2 = static_cast<int>(centerOfObjectY - 25.0) - 1;
i3 = static_cast<int>(centerOfObjectY + 25.0);
}
hipLaunchKernelGGL(( colourSegmentation_kernel4), dim3(dim3(46U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_centerColourMask);
// Make mask in the square for every 9 colours.
// Calcualte which pixels are red
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
HSVImage_size[0] = b_i + 1;
HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &grid, &block, 1024U, 65535U);
if (validLaunchParams) {
hipMemcpy(gpu_HSVImage_size, &HSVImage_size[0], 8UL, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( colourSegmentation_kernel5), dim3(grid), dim3(block), 0, 0, *gpu_HSVImage, i2, i,
*gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
hipLaunchKernelGGL(( colourSegmentation_kernel6), dim3(dim3(6U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage_data, *gpu_centerColourMask);
// Calcualte which pixels are orange
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
b_HSVImage_size[0] = b_i + 1;
b_HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &b_grid, &b_block, 1024U, 65535U);
if (validLaunchParams) {
hipMemcpy(b_gpu_HSVImage_size, &b_HSVImage_size[0], 8UL,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( colourSegmentation_kernel7), dim3(b_grid), dim3(b_block), 0, 0, *gpu_HSVImage, i2, i,
*b_gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
hipLaunchKernelGGL(( colourSegmentation_kernel8), dim3(dim3(6U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage_data, *gpu_centerColourMask);
// Calcualte which pixels are yellow
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
c_HSVImage_size[0] = b_i + 1;
c_HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &c_grid, &c_block, 1024U, 65535U);
if (validLaunchParams) {
hipMemcpy(c_gpu_HSVImage_size, &c_HSVImage_size[0], 8UL,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( colourSegmentation_kernel9), dim3(c_grid), dim3(c_block), 0, 0, *gpu_HSVImage, i2, i,
*c_gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
hipLaunchKernelGGL(( colourSegmentation_kernel10), dim3(dim3(6U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage_data, *gpu_centerColourMask);
// Calcualte which pixels are green
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
d_HSVImage_size[0] = b_i + 1;
d_HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &d_grid, &d_block, 1024U, 65535U);
if (validLaunchParams) {
hipMemcpy(d_gpu_HSVImage_size, &d_HSVImage_size[0], 8UL,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( colourSegmentation_kernel11), dim3(d_grid), dim3(d_block), 0, 0, *gpu_HSVImage, i2, i,
*d_gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
hipLaunchKernelGGL(( colourSegmentation_kernel12), dim3(dim3(6U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage_data, *gpu_centerColourMask);
// Calcualte which pixels are cyan
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
e_HSVImage_size[0] = b_i + 1;
e_HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &e_grid, &e_block, 1024U, 65535U);
if (validLaunchParams) {
hipMemcpy(e_gpu_HSVImage_size, &e_HSVImage_size[0], 8UL,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( colourSegmentation_kernel13), dim3(e_grid), dim3(e_block), 0, 0, *gpu_HSVImage, i2, i,
*e_gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
hipLaunchKernelGGL(( colourSegmentation_kernel14), dim3(dim3(6U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage_data, *gpu_centerColourMask);
// Calcualte which pixels are blue
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
f_HSVImage_size[0] = b_i + 1;
f_HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &f_grid, &f_block, 1024U, 65535U);
if (validLaunchParams) {
hipMemcpy(f_gpu_HSVImage_size, &f_HSVImage_size[0], 8UL,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( colourSegmentation_kernel15), dim3(f_grid), dim3(f_block), 0, 0, *gpu_HSVImage, i2, i,
*f_gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
hipLaunchKernelGGL(( colourSegmentation_kernel16), dim3(dim3(6U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage_data, *gpu_centerColourMask);
// Calcualte which pixels are purple
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
g_HSVImage_size[0] = b_i + 1;
g_HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &g_grid, &g_block, 1024U, 65535U);
if (validLaunchParams) {
hipMemcpy(g_gpu_HSVImage_size, &g_HSVImage_size[0], 8UL,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( colourSegmentation_kernel17), dim3(g_grid), dim3(g_block), 0, 0, *gpu_HSVImage, i2, i,
*g_gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
hipLaunchKernelGGL(( colourSegmentation_kernel18), dim3(dim3(6U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage_data, *gpu_centerColourMask);
// Calcualte which pixels are white
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
h_HSVImage_size[0] = b_i + 1;
h_HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &h_grid, &h_block, 1024U, 65535U);
if (validLaunchParams) {
hipMemcpy(h_gpu_HSVImage_size, &h_HSVImage_size[0], 8UL,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( colourSegmentation_kernel19), dim3(h_grid), dim3(h_block), 0, 0, *gpu_HSVImage, i2, i,
*h_gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
hipLaunchKernelGGL(( colourSegmentation_kernel20), dim3(dim3(6U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage_data, *gpu_centerColourMask);
// Calcualte which pixels are black
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
i_HSVImage_size[0] = b_i + 1;
i_HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &i_grid, &i_block, 1024U, 65535U);
if (validLaunchParams) {
hipMemcpy(i_gpu_HSVImage_size, &i_HSVImage_size[0], 8UL,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( colourSegmentation_kernel21), dim3(i_grid), dim3(i_block), 0, 0, *gpu_HSVImage, i2, i,
*i_gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
hipLaunchKernelGGL(( colourSegmentation_kernel22), dim3(dim3(6U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage_data, *gpu_centerColourMask);
// Sum up all the masks and then determine which mask has the most pixels
hipLaunchKernelGGL(( colourSegmentation_kernel23), dim3(dim3(1U, 1U, 1U)), dim3(dim3(32U, 1U, 1U)), 0, 0,
*gpu_centerColourMask, *gpu_outputs);
i1 = 1;
hipMemcpy(&outputs[0], gpu_outputs, 72UL, hipMemcpyDeviceToHost);
validLaunchParams = false;
ex = outputs[0].f1[0];
for (i = 0; i < 8; i++) {
if (validLaunchParams) {
hipMemcpy(&outputs[0], gpu_outputs, 72UL, hipMemcpyDeviceToHost);
validLaunchParams = false;
}
if (ex < outputs[0].f1[i + 1]) {
ex = outputs[0].f1[i + 1];
i1 = i + 2;
}
}
// Make a mask over the whole image. The colour that is seperated
// is the one that had the most pixels in the square
if (i1 == 1) {
// Red
hipLaunchKernelGGL(( colourSegmentation_kernel32), dim3(dim3(1423U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage, *gpu_colourSegmentationMask);
} else if (i1 == 2) {
// Orange
hipLaunchKernelGGL(( colourSegmentation_kernel31), dim3(dim3(1423U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage, *gpu_colourSegmentationMask);
} else if (i1 == 3) {
// Yellow
hipLaunchKernelGGL(( colourSegmentation_kernel30), dim3(dim3(1423U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage, *gpu_colourSegmentationMask);
} else if (i1 == 4) {
// Green
hipLaunchKernelGGL(( colourSegmentation_kernel29), dim3(dim3(1423U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage, *gpu_colourSegmentationMask);
} else if (i1 == 5) {
// Cyan
hipLaunchKernelGGL(( colourSegmentation_kernel28), dim3(dim3(1423U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage, *gpu_colourSegmentationMask);
} else if (i1 == 6) {
// Blue
hipLaunchKernelGGL(( colourSegmentation_kernel27), dim3(dim3(1423U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage, *gpu_colourSegmentationMask);
} else if (i1 == 7) {
// Purple
hipLaunchKernelGGL(( colourSegmentation_kernel26), dim3(dim3(1423U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage, *gpu_colourSegmentationMask);
} else if (i1 == 8) {
// White
hipLaunchKernelGGL(( colourSegmentation_kernel25), dim3(dim3(1423U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage, *gpu_colourSegmentationMask);
} else {
// Black
hipLaunchKernelGGL(( colourSegmentation_kernel24), dim3(dim3(1423U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
*gpu_HSVImage, *gpu_colourSegmentationMask);
}
hipMemcpy(&colourSegmentationMask[0], gpu_colourSegmentationMask, 728160UL,
hipMemcpyDeviceToHost);
hipFree(*gpu_colourBalancedImage);
hipFree(*gpu_Xin);
hipFree(*gpu_HSVImage);
hipFree(*gpu_HSVImage_size);
hipFree(*gpu_centerColourMask);
hipFree(*gpu_HSVImage_data);
hipFree(*b_gpu_HSVImage_size);
hipFree(*c_gpu_HSVImage_size);
hipFree(*d_gpu_HSVImage_size);
hipFree(*e_gpu_HSVImage_size);
hipFree(*f_gpu_HSVImage_size);
hipFree(*g_gpu_HSVImage_size);
hipFree(*h_gpu_HSVImage_size);
hipFree(*i_gpu_HSVImage_size);
hipFree(*gpu_outputs);
hipFree(*gpu_colourSegmentationMask);
}
//
// File trailer for colourSegmentation.cu
//
// [EOF]
//
| e42e04248b57afbff273339377050fc9933a2622.cu | //
// Academic License - for use in teaching, academic research, and meeting
// course requirements at degree granting institutions only. Not for
// government, commercial, or other organizational use.
// File: colourSegmentation.cu
//
// GPU Coder version : 2.0
// CUDA/C/C++ source code generated on : 05-Nov-2020 09:20:00
//
// Include Files
#include "colourSegmentation.h"
#include "MWCudaDimUtility.hpp"
#include "MWLaunchParametersUtilities.hpp"
// Type Definitions
struct cell_wrap_7
{
double f1[9];
};
// Function Declarations
static __global__ void colourSegmentation_kernel1(const unsigned char
colourBalancedImage[2184480], double Xin[2184480]);
static __global__ void colourSegmentation_kernel10(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel11(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
static __global__ void colourSegmentation_kernel12(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel13(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
static __global__ void colourSegmentation_kernel14(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel15(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
static __global__ void colourSegmentation_kernel16(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel17(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
static __global__ void colourSegmentation_kernel18(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel19(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
static __global__ void colourSegmentation_kernel2(const double Xin[2184480],
double HSVImage[2184480]);
static __global__ void colourSegmentation_kernel20(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel21(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
static __global__ void colourSegmentation_kernel22(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel23(const bool centerColourMask
[23409], cell_wrap_7 outputs[1]);
static __global__ void colourSegmentation_kernel24(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel25(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel26(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel27(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel28(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel29(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel3(double HSVImage[2184480]);
static __global__ void colourSegmentation_kernel30(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel31(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel32(const double HSVImage[2184480],
bool colourSegmentationMask[728160]);
static __global__ void colourSegmentation_kernel4(bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel5(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
static __global__ void colourSegmentation_kernel6(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel7(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
static __global__ void colourSegmentation_kernel8(const bool HSVImage_data
[728160], bool centerColourMask[23409]);
static __global__ void colourSegmentation_kernel9(const double HSVImage[2184480],
const int i2, const int i, const int HSVImage_size[2], const int b_i, const
int i1, bool HSVImage_data[728160]);
// Function Definitions
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const unsigned char colourBalancedImage[2184480]
// double Xin[2184480]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel1(
const unsigned char colourBalancedImage[2184480], double Xin[2184480])
{
int ix;
ix = static_cast<int>(mwGetGlobalThreadIndex());
if (ix < 2184480) {
// Input:
// colourBalancedImage = RGB image 984x740x3
// centerOfObjectX = Objects center postion in the x-axis
// centerOfObjectY = Objects center postion in the y-axis
// Output:
// colourSegmentationMask = bitmask of the segmented object
// Transform the Image into the HSV colour space
Xin[ix] = static_cast<double>(colourBalancedImage[ix]) / 255.0;
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel10(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[(xpageoffset + 51 * ix) + 5202] = HSVImage_data[xpageoffset
+ 51 * ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel11(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(HSVImage[(i +
xpageoffset) + 984 * (i2 + ix)] > 70.0)) && (static_cast<int>(HSVImage[(i
+ xpageoffset) + 984 * (i2 + ix)] < 155.0)))) || (static_cast<int>
(HSVImage[(i + xpageoffset) + 984 * (i2 + ix)] == 70.0)))) && (
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] >
0.2)))) && (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel12(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[(xpageoffset + 51 * ix) + 7803] = HSVImage_data[xpageoffset
+ 51 * ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel13(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(HSVImage[(i +
xpageoffset) + 984 * (i2 + ix)] > 155.0)) && (static_cast<int>(HSVImage[(i
+ xpageoffset) + 984 * (i2 + ix)] < 205.0)))) || (static_cast<int>
(HSVImage[(i + xpageoffset) + 984 * (i2 + ix)] == 155.0)))) && (
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] >
0.2)))) && (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel14(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[(xpageoffset + 51 * ix) + 10404] =
HSVImage_data[xpageoffset + 51 * ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel15(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(HSVImage[(i +
xpageoffset) + 984 * (i2 + ix)] > 205.0)) && (static_cast<int>(HSVImage[(i
+ xpageoffset) + 984 * (i2 + ix)] < 260.0)))) || (static_cast<int>
(HSVImage[(i + xpageoffset) + 984 * (i2 + ix)] == 205.0)))) && (
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] >
0.2)))) && (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel16(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[(xpageoffset + 51 * ix) + 13005] =
HSVImage_data[xpageoffset + 51 * ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel17(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(HSVImage[(i +
xpageoffset) + 984 * (i2 + ix)] > 260.0)) && (static_cast<int>(HSVImage[(i
+ xpageoffset) + 984 * (i2 + ix)] < 315.0)))) || (static_cast<int>
(HSVImage[(i + xpageoffset) + 984 * (i2 + ix)] == 260.0)))) && (
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] >
0.2)))) && (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel18(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[(xpageoffset + 51 * ix) + 15606] =
HSVImage_data[xpageoffset + 51 * ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel19(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 1456320]
> 0.6)) || (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] == 0.6)))) && (static_cast<int>((
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] <
0.2)) || (static_cast<int>(HSVImage[((i + xpageoffset) +
984 * (i2 + ix)) + 728160] == 0.2)))));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double Xin[2184480]
// double HSVImage[2184480]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel2(
const double Xin[2184480], double HSVImage[2184480])
{
double varargin_1[3];
double h;
double tmp;
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
double d1;
double d2;
double delta;
double v;
int i;
d = Xin[xpageoffset + 984 * ix];
varargin_1[0] = d;
d1 = Xin[(xpageoffset + 984 * ix) + 728160];
varargin_1[1] = d1;
d2 = Xin[(xpageoffset + 984 * ix) + 1456320];
varargin_1[2] = d2;
v = d;
for (i = 0; i < 2; i++) {
if (v < varargin_1[i + 1]) {
v = varargin_1[i + 1];
}
}
varargin_1[0] = d;
varargin_1[1] = d1;
varargin_1[2] = d2;
tmp = d;
for (i = 0; i < 2; i++) {
if (tmp > varargin_1[i + 1]) {
tmp = varargin_1[i + 1];
}
}
delta = v - tmp;
tmp = delta;
if (delta == 0.0) {
tmp = 1.0;
}
h = 0.0;
if (d == v) {
h = (d1 - d2) / tmp;
}
if (d1 == v) {
h = (d2 - d) / tmp + 2.0;
}
if (d2 == v) {
h = (d - d1) / tmp + 4.0;
}
h /= 6.0;
if (h < 0.0) {
h++;
}
tmp /= v;
if (delta == 0.0) {
h = 0.0;
tmp = 0.0;
}
if (!static_cast<int>(v != 0.0)) {
tmp = 0.0;
}
HSVImage[xpageoffset + 984 * ix] = h;
HSVImage[(xpageoffset + 984 * ix) + 728160] = tmp;
HSVImage[(xpageoffset + 984 * ix) + 1456320] = v;
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel20(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[(xpageoffset + 51 * ix) + 18207] =
HSVImage_data[xpageoffset + 51 * ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel21(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 1456320]
< 0.3)) || (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] == 0.3)))) || (static_cast<int>((
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 1456320]
< 0.6)) && (static_cast<int>((static_cast<int>(HSVImage
[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] < 0.2)) || (static_cast<
int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] == 0.2)))))));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel22(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[(xpageoffset + 51 * ix) + 20808] =
HSVImage_data[xpageoffset + 51 * ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool centerColourMask[23409]
// cell_wrap_7 outputs[1]
// Return Type : void
//
static __global__ __launch_bounds__(32, 1) void colourSegmentation_kernel23(
const bool centerColourMask[23409], cell_wrap_7 outputs[1])
{
double d;
int i;
i = static_cast<int>(mwGetGlobalThreadIndex());
if (i < 9) {
int xpageoffset;
// Sum up all the masks and then determine which mask has the most pixels
xpageoffset = i * 2601;
d = static_cast<double>(centerColourMask[xpageoffset]);
for (int ix = 0; ix < 2600; ix++) {
d += static_cast<double>(centerColourMask[(xpageoffset + ix) + 1]);
}
outputs[0].f1[i] = d;
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel24(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
double d1;
d = HSVImage[(xpageoffset + 984 * ix) + 1456320];
d1 = HSVImage[(xpageoffset + 984 * ix) + 728160];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>(d < 0.3)) || (static_cast<int>(d == 0.3)))) || (
static_cast<int>((static_cast<int>(d < 0.6)) && (static_cast<int>((
static_cast<int>(d1 < 0.2)) || (static_cast<int>(d1 == 0.2)))))));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel25(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
double d1;
d = HSVImage[(xpageoffset + 984 * ix) + 1456320];
d1 = HSVImage[(xpageoffset + 984 * ix) + 728160];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>(d > 0.6)) || (static_cast<int>(d == 0.6)))) && (
static_cast<int>((static_cast<int>(d1 < 0.2)) || (static_cast<int>(d1 ==
0.2)))));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel26(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
d = HSVImage[xpageoffset + 984 * ix];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(d > 260.0)) && (
static_cast<int>(d < 315.0)))) || (static_cast<int>(d == 260.0)))) && (
static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 728160] > 0.2)))) &&
(static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel27(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
d = HSVImage[xpageoffset + 984 * ix];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(d > 205.0)) && (
static_cast<int>(d < 260.0)))) || (static_cast<int>(d == 205.0)))) && (
static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 728160] > 0.2)))) &&
(static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel28(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
d = HSVImage[xpageoffset + 984 * ix];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(d > 155.0)) && (
static_cast<int>(d < 205.0)))) || (static_cast<int>(d == 155.0)))) && (
static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 728160] > 0.2)))) &&
(static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel29(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
d = HSVImage[xpageoffset + 984 * ix];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(d > 70.0)) && (
static_cast<int>(d < 155.0)))) || (static_cast<int>(d == 70.0)))) && (
static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 728160] > 0.2)))) &&
(static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// double HSVImage[2184480]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel3
(double HSVImage[2184480])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
HSVImage[xpageoffset + 984 * ix] *= 360.0;
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel30(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
d = HSVImage[xpageoffset + 984 * ix];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(d > 50.0)) && (
static_cast<int>(d < 70.0)))) || (static_cast<int>(d == 50.0)))) && (
static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 728160] > 0.2)))) &&
(static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel31(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
d = HSVImage[xpageoffset + 984 * ix];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(d > 15.0)) && (
static_cast<int>(d < 50.0)))) || (static_cast<int>(d == 15.0)))) && (
static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 728160] > 0.2)))) &&
(static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// bool colourSegmentationMask[728160]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel32(
const double HSVImage[2184480], bool colourSegmentationMask[728160])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 984UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
984UL);
if ((static_cast<int>(ix < 740)) && (static_cast<int>(xpageoffset < 984))) {
double d;
d = HSVImage[xpageoffset + 984 * ix];
colourSegmentationMask[xpageoffset + 984 * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(d < 15.0)) || (
static_cast<int>(d == 315.0)))) || (static_cast<int>(d > 315.0)))) && (
static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 728160] > 0.2)))) &&
(static_cast<int>(HSVImage[(xpageoffset + 984 * ix) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel4(bool
centerColourMask[23409])
{
int ix;
ix = static_cast<int>(mwGetGlobalThreadIndex());
if (ix < 23409) {
centerColourMask[ix] = false;
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel5(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(HSVImage[(i +
xpageoffset) + 984 * (i2 + ix)] < 15.0)) || (static_cast<int>(HSVImage[(i
+ xpageoffset) + 984 * (i2 + ix)] == 315.0)))) || (static_cast<int>
(HSVImage[(i + xpageoffset) + 984 * (i2 + ix)] > 315.0)))) && (
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] >
0.2)))) && (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel6(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[xpageoffset + 51 * ix] = HSVImage_data[xpageoffset + 51 *
ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel7(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(HSVImage[(i +
xpageoffset) + 984 * (i2 + ix)] > 15.0)) && (static_cast<int>(HSVImage[(i
+ xpageoffset) + 984 * (i2 + ix)] < 50.0)))) || (static_cast<int>
(HSVImage[(i + xpageoffset) + 984 * (i2 + ix)] == 15.0)))) && (
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] >
0.2)))) && (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] > 0.3)));
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const bool HSVImage_data[728160]
// bool centerColourMask[23409]
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void colourSegmentation_kernel8(
const bool HSVImage_data[728160], bool centerColourMask[23409])
{
unsigned long threadId;
int ix;
int xpageoffset;
threadId = mwGetGlobalThreadIndex();
xpageoffset = static_cast<int>(threadId % 51UL);
ix = static_cast<int>((threadId - static_cast<unsigned long>(xpageoffset)) /
51UL);
if ((static_cast<int>(ix < 51)) && (static_cast<int>(xpageoffset < 51))) {
centerColourMask[(xpageoffset + 51 * ix) + 2601] = HSVImage_data[xpageoffset
+ 51 * ix];
}
}
//
// Arguments : dim3 blockArg
// dim3 gridArg
// const double HSVImage[2184480]
// const int i2
// const int i
// const int HSVImage_size[2]
// const int b_i
// const int i1
// bool HSVImage_data[728160]
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void colourSegmentation_kernel9(
const double HSVImage[2184480], const int i2, const int i, const int
HSVImage_size[2], const int b_i, const int i1, bool HSVImage_data[728160])
{
unsigned long idx;
long loopEnd;
unsigned long threadId;
unsigned long threadStride;
threadId = mwGetGlobalThreadIndex();
threadStride = mwGetTotalThreadsLaunched();
loopEnd = (static_cast<long>(b_i) + 1L) * (static_cast<long>(i1) + 1L) - 1L;
for (idx = threadId; idx <= static_cast<unsigned long>(loopEnd); idx +=
threadStride) {
int ix;
int xpageoffset;
xpageoffset = static_cast<int>(idx % (static_cast<unsigned long>(b_i) + 1UL));
ix = static_cast<int>((idx - static_cast<unsigned long>(xpageoffset)) / (
static_cast<unsigned long>(b_i) + 1UL));
HSVImage_data[xpageoffset + HSVImage_size[0] * ix] = ((static_cast<int>((
static_cast<int>((static_cast<int>((static_cast<int>(HSVImage[(i +
xpageoffset) + 984 * (i2 + ix)] > 50.0)) && (static_cast<int>(HSVImage[(i
+ xpageoffset) + 984 * (i2 + ix)] < 70.0)))) || (static_cast<int>
(HSVImage[(i + xpageoffset) + 984 * (i2 + ix)] == 50.0)))) && (
static_cast<int>(HSVImage[((i + xpageoffset) + 984 * (i2 + ix)) + 728160] >
0.2)))) && (static_cast<int>(HSVImage[((i + xpageoffset)
+ 984 * (i2 + ix)) + 1456320] > 0.3)));
}
}
//
// The function takes in a image and a rough postion of the object in the
// image that should be segmented. and it outputs a a bitmask of the the
// object.
// Arguments : const unsigned char colourBalancedImage[2184480]
// double centerOfObjectX
// double centerOfObjectY
// bool colourSegmentationMask[728160]
// Return Type : void
//
void colourSegmentation(const unsigned char colourBalancedImage[2184480], double
centerOfObjectX, double centerOfObjectY, bool colourSegmentationMask[728160])
{
cell_wrap_7 outputs[1];
cell_wrap_7 (*gpu_outputs)[1];
dim3 b_block;
dim3 b_grid;
dim3 block;
dim3 c_block;
dim3 c_grid;
dim3 d_block;
dim3 d_grid;
dim3 e_block;
dim3 e_grid;
dim3 f_block;
dim3 f_grid;
dim3 g_block;
dim3 g_grid;
dim3 grid;
dim3 h_block;
dim3 h_grid;
dim3 i_block;
dim3 i_grid;
double (*gpu_HSVImage)[2184480];
double (*gpu_Xin)[2184480];
double ex;
int HSVImage_size[2];
int b_HSVImage_size[2];
int c_HSVImage_size[2];
int d_HSVImage_size[2];
int e_HSVImage_size[2];
int f_HSVImage_size[2];
int g_HSVImage_size[2];
int h_HSVImage_size[2];
int i_HSVImage_size[2];
int (*b_gpu_HSVImage_size)[2];
int (*c_gpu_HSVImage_size)[2];
int (*d_gpu_HSVImage_size)[2];
int (*e_gpu_HSVImage_size)[2];
int (*f_gpu_HSVImage_size)[2];
int (*g_gpu_HSVImage_size)[2];
int (*gpu_HSVImage_size)[2];
int (*h_gpu_HSVImage_size)[2];
int (*i_gpu_HSVImage_size)[2];
int b_i;
int b_i1;
int i;
int i1;
int i2;
int i3;
unsigned char (*gpu_colourBalancedImage)[2184480];
bool (*gpu_HSVImage_data)[728160];
bool (*gpu_colourSegmentationMask)[728160];
bool (*gpu_centerColourMask)[23409];
bool validLaunchParams;
cudaMalloc(&gpu_colourSegmentationMask, 728160UL);
cudaMalloc(&gpu_outputs, 72UL);
cudaMalloc(&i_gpu_HSVImage_size, 8UL);
cudaMalloc(&h_gpu_HSVImage_size, 8UL);
cudaMalloc(&g_gpu_HSVImage_size, 8UL);
cudaMalloc(&f_gpu_HSVImage_size, 8UL);
cudaMalloc(&e_gpu_HSVImage_size, 8UL);
cudaMalloc(&d_gpu_HSVImage_size, 8UL);
cudaMalloc(&c_gpu_HSVImage_size, 8UL);
cudaMalloc(&b_gpu_HSVImage_size, 8UL);
cudaMalloc(&gpu_HSVImage_data, 728160UL);
cudaMalloc(&gpu_centerColourMask, 23409UL);
cudaMalloc(&gpu_HSVImage_size, 8UL);
cudaMalloc(&gpu_HSVImage, 17475840UL);
cudaMalloc(&gpu_Xin, 17475840UL);
cudaMalloc(&gpu_colourBalancedImage, 2184480UL);
// Input:
// colourBalancedImage = RGB image 984x740x3
// centerOfObjectX = Objects center postion in the x-axis
// centerOfObjectY = Objects center postion in the y-axis
// Output:
// colourSegmentationMask = bitmask of the segmented object
// Transform the Image into the HSV colour space
cudaMemcpy(gpu_colourBalancedImage, (void *)&colourBalancedImage[0], 2184480UL,
cudaMemcpyHostToDevice);
colourSegmentation_kernel1<<<dim3(4267U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_colourBalancedImage, *gpu_Xin);
colourSegmentation_kernel2<<<dim3(1423U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_Xin, *gpu_HSVImage);
// Standardize the H-values from 0-1 to 0-360
colourSegmentation_kernel3<<<dim3(1423U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage);
// Make a small square at the center of the object and determine which
// colour is most prevalent in the square
// Choose square size
// Calculate the start and end values for X and Y
// Take out the sqaure area from the image
if (centerOfObjectX - 25.0 > centerOfObjectX + 25.0) {
i = 0;
i1 = 0;
} else {
i = static_cast<int>(centerOfObjectX - 25.0) - 1;
i1 = static_cast<int>(centerOfObjectX + 25.0);
}
if (centerOfObjectY - 25.0 > centerOfObjectY + 25.0) {
i2 = 0;
i3 = 0;
} else {
i2 = static_cast<int>(centerOfObjectY - 25.0) - 1;
i3 = static_cast<int>(centerOfObjectY + 25.0);
}
colourSegmentation_kernel4<<<dim3(46U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_centerColourMask);
// Make mask in the square for every 9 colours.
// Calcualte which pixels are red
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
HSVImage_size[0] = b_i + 1;
HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &grid, &block, 1024U, 65535U);
if (validLaunchParams) {
cudaMemcpy(gpu_HSVImage_size, &HSVImage_size[0], 8UL, cudaMemcpyHostToDevice);
colourSegmentation_kernel5<<<grid, block>>>(*gpu_HSVImage, i2, i,
*gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
colourSegmentation_kernel6<<<dim3(6U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage_data, *gpu_centerColourMask);
// Calcualte which pixels are orange
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
b_HSVImage_size[0] = b_i + 1;
b_HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &b_grid, &b_block, 1024U, 65535U);
if (validLaunchParams) {
cudaMemcpy(b_gpu_HSVImage_size, &b_HSVImage_size[0], 8UL,
cudaMemcpyHostToDevice);
colourSegmentation_kernel7<<<b_grid, b_block>>>(*gpu_HSVImage, i2, i,
*b_gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
colourSegmentation_kernel8<<<dim3(6U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage_data, *gpu_centerColourMask);
// Calcualte which pixels are yellow
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
c_HSVImage_size[0] = b_i + 1;
c_HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &c_grid, &c_block, 1024U, 65535U);
if (validLaunchParams) {
cudaMemcpy(c_gpu_HSVImage_size, &c_HSVImage_size[0], 8UL,
cudaMemcpyHostToDevice);
colourSegmentation_kernel9<<<c_grid, c_block>>>(*gpu_HSVImage, i2, i,
*c_gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
colourSegmentation_kernel10<<<dim3(6U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage_data, *gpu_centerColourMask);
// Calcualte which pixels are green
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
d_HSVImage_size[0] = b_i + 1;
d_HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &d_grid, &d_block, 1024U, 65535U);
if (validLaunchParams) {
cudaMemcpy(d_gpu_HSVImage_size, &d_HSVImage_size[0], 8UL,
cudaMemcpyHostToDevice);
colourSegmentation_kernel11<<<d_grid, d_block>>>(*gpu_HSVImage, i2, i,
*d_gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
colourSegmentation_kernel12<<<dim3(6U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage_data, *gpu_centerColourMask);
// Calcualte which pixels are cyan
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
e_HSVImage_size[0] = b_i + 1;
e_HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &e_grid, &e_block, 1024U, 65535U);
if (validLaunchParams) {
cudaMemcpy(e_gpu_HSVImage_size, &e_HSVImage_size[0], 8UL,
cudaMemcpyHostToDevice);
colourSegmentation_kernel13<<<e_grid, e_block>>>(*gpu_HSVImage, i2, i,
*e_gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
colourSegmentation_kernel14<<<dim3(6U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage_data, *gpu_centerColourMask);
// Calcualte which pixels are blue
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
f_HSVImage_size[0] = b_i + 1;
f_HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &f_grid, &f_block, 1024U, 65535U);
if (validLaunchParams) {
cudaMemcpy(f_gpu_HSVImage_size, &f_HSVImage_size[0], 8UL,
cudaMemcpyHostToDevice);
colourSegmentation_kernel15<<<f_grid, f_block>>>(*gpu_HSVImage, i2, i,
*f_gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
colourSegmentation_kernel16<<<dim3(6U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage_data, *gpu_centerColourMask);
// Calcualte which pixels are purple
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
g_HSVImage_size[0] = b_i + 1;
g_HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &g_grid, &g_block, 1024U, 65535U);
if (validLaunchParams) {
cudaMemcpy(g_gpu_HSVImage_size, &g_HSVImage_size[0], 8UL,
cudaMemcpyHostToDevice);
colourSegmentation_kernel17<<<g_grid, g_block>>>(*gpu_HSVImage, i2, i,
*g_gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
colourSegmentation_kernel18<<<dim3(6U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage_data, *gpu_centerColourMask);
// Calcualte which pixels are white
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
h_HSVImage_size[0] = b_i + 1;
h_HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &h_grid, &h_block, 1024U, 65535U);
if (validLaunchParams) {
cudaMemcpy(h_gpu_HSVImage_size, &h_HSVImage_size[0], 8UL,
cudaMemcpyHostToDevice);
colourSegmentation_kernel19<<<h_grid, h_block>>>(*gpu_HSVImage, i2, i,
*h_gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
colourSegmentation_kernel20<<<dim3(6U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage_data, *gpu_centerColourMask);
// Calcualte which pixels are black
b_i = (i1 - i) - 1;
b_i1 = (i3 - i2) - 1;
i_HSVImage_size[0] = b_i + 1;
i_HSVImage_size[1] = b_i1 + 1;
validLaunchParams = mwGetLaunchParameters(static_cast<double>((b_i + 1L) *
(b_i1 + 1L)), &i_grid, &i_block, 1024U, 65535U);
if (validLaunchParams) {
cudaMemcpy(i_gpu_HSVImage_size, &i_HSVImage_size[0], 8UL,
cudaMemcpyHostToDevice);
colourSegmentation_kernel21<<<i_grid, i_block>>>(*gpu_HSVImage, i2, i,
*i_gpu_HSVImage_size, b_i, b_i1, *gpu_HSVImage_data);
}
colourSegmentation_kernel22<<<dim3(6U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage_data, *gpu_centerColourMask);
// Sum up all the masks and then determine which mask has the most pixels
colourSegmentation_kernel23<<<dim3(1U, 1U, 1U), dim3(32U, 1U, 1U)>>>
(*gpu_centerColourMask, *gpu_outputs);
i1 = 1;
cudaMemcpy(&outputs[0], gpu_outputs, 72UL, cudaMemcpyDeviceToHost);
validLaunchParams = false;
ex = outputs[0].f1[0];
for (i = 0; i < 8; i++) {
if (validLaunchParams) {
cudaMemcpy(&outputs[0], gpu_outputs, 72UL, cudaMemcpyDeviceToHost);
validLaunchParams = false;
}
if (ex < outputs[0].f1[i + 1]) {
ex = outputs[0].f1[i + 1];
i1 = i + 2;
}
}
// Make a mask over the whole image. The colour that is seperated
// is the one that had the most pixels in the square
if (i1 == 1) {
// Red
colourSegmentation_kernel32<<<dim3(1423U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage, *gpu_colourSegmentationMask);
} else if (i1 == 2) {
// Orange
colourSegmentation_kernel31<<<dim3(1423U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage, *gpu_colourSegmentationMask);
} else if (i1 == 3) {
// Yellow
colourSegmentation_kernel30<<<dim3(1423U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage, *gpu_colourSegmentationMask);
} else if (i1 == 4) {
// Green
colourSegmentation_kernel29<<<dim3(1423U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage, *gpu_colourSegmentationMask);
} else if (i1 == 5) {
// Cyan
colourSegmentation_kernel28<<<dim3(1423U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage, *gpu_colourSegmentationMask);
} else if (i1 == 6) {
// Blue
colourSegmentation_kernel27<<<dim3(1423U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage, *gpu_colourSegmentationMask);
} else if (i1 == 7) {
// Purple
colourSegmentation_kernel26<<<dim3(1423U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage, *gpu_colourSegmentationMask);
} else if (i1 == 8) {
// White
colourSegmentation_kernel25<<<dim3(1423U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage, *gpu_colourSegmentationMask);
} else {
// Black
colourSegmentation_kernel24<<<dim3(1423U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(*gpu_HSVImage, *gpu_colourSegmentationMask);
}
cudaMemcpy(&colourSegmentationMask[0], gpu_colourSegmentationMask, 728160UL,
cudaMemcpyDeviceToHost);
cudaFree(*gpu_colourBalancedImage);
cudaFree(*gpu_Xin);
cudaFree(*gpu_HSVImage);
cudaFree(*gpu_HSVImage_size);
cudaFree(*gpu_centerColourMask);
cudaFree(*gpu_HSVImage_data);
cudaFree(*b_gpu_HSVImage_size);
cudaFree(*c_gpu_HSVImage_size);
cudaFree(*d_gpu_HSVImage_size);
cudaFree(*e_gpu_HSVImage_size);
cudaFree(*f_gpu_HSVImage_size);
cudaFree(*g_gpu_HSVImage_size);
cudaFree(*h_gpu_HSVImage_size);
cudaFree(*i_gpu_HSVImage_size);
cudaFree(*gpu_outputs);
cudaFree(*gpu_colourSegmentationMask);
}
//
// File trailer for colourSegmentation.cu
//
// [EOF]
//
|
3c9d4c773afbbf255308136e48e24a4a3f3a5ff0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "commonblas_z.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define COMPLEX
//==============================================================================
__global__
void magma_zlarfgx_gpu_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex* dx,
magmaDoubleComplex *dtau, double *dxnorm,
magmaDoubleComplex *dA, int it)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaDoubleComplex scale;
__shared__ double xnorm;
magmaDoubleComplex dxi;
if ( j < n-1 )
dxi = dx[j];
if ( i == 0 ) {
xnorm = *dxnorm;
#ifdef REAL
double alpha = *dx0;
double alphai = MAGMA_Z_ZERO;
if ( (xnorm == 0 && alphai == MAGMA_Z_ZERO ) || n == 1 )
#else
magmaDoubleComplex alpha = *dx0;
double alphar = MAGMA_Z_REAL(alpha), alphai = MAGMA_Z_IMAG(alpha);
if ( (xnorm == 0 && alphai == MAGMA_Z_ZERO ) || n == 0 )
#endif
{
*dtau = MAGMA_Z_ZERO;
*dA = *dx0;
}
else {
#ifdef REAL
// no need to compute the norm as it is passed as input
double beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j == 0) {
*dtau = (beta - alpha) / beta;
//*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = beta;
}
scale = 1. / (alpha - beta);
#else
// no need to compute the norm as it is passed as input
double beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j == 0) {
*dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta);
//*dx0 = MAGMA_Z_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = MAGMA_Z_MAKE(beta, 0.);
}
alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha));
scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha);
#endif
}
}
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_Z_MUL(dxi, scale);
if (j < it) {
*( dA-it+j) = *(dx0-it+j);
*(dx0-it+j) = MAGMA_Z_MAKE(0., 0.);
}
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's zlarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_zlarfgx_gpu_q(
magma_int_t n,
magmaDoubleComplex_ptr dx0,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dtau,
magmaDouble_ptr dxnorm,
magmaDoubleComplex_ptr dA, magma_int_t iter,
magma_queue_t queue )
{
dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_zlarfgx_gpu_kernel)
, dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
n, dx0, dx, dtau, dxnorm, dA, iter);
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's zlarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_zlarfgtx_gpu_q(
magma_int_t n,
magmaDoubleComplex_ptr dx0,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dtau,
magmaDouble_ptr dxnorm,
magmaDoubleComplex_ptr dA, magma_int_t iter,
magmaDoubleComplex_ptr V, magma_int_t ldv,
magmaDoubleComplex_ptr T, magma_int_t ldt,
magmaDoubleComplex_ptr dwork,
magma_queue_t queue )
{
/* Generate the elementary reflector H(iter) */
magma_zlarfgx_gpu_q(n, dx0, dx, dtau, dxnorm, dA, iter, queue);
if (iter == 0) {
magmaDoubleComplex tt = MAGMA_Z_ONE;
magmablas_zlacpy_q( MagmaFull, 1, 1, dtau, 1, T+iter+iter*ldt, 1, queue );
magma_zsetmatrix_q( 1, 1, &tt, 1, dx0, 1, queue );
}
else {
/* Compute the iter-th column of T */
hipLaunchKernelGGL(( magma_zgemv_kernel3)
, dim3(iter), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
n, V, ldv, dx0, dwork, dtau );
hipLaunchKernelGGL(( magma_ztrmv_kernel2)
, dim3(iter), dim3(iter), 0, queue->cuda_stream() ,
T, ldt, dwork, T+iter*ldt, dtau );
}
}
//==============================================================================
| 3c9d4c773afbbf255308136e48e24a4a3f3a5ff0.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "commonblas_z.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define COMPLEX
//==============================================================================
__global__
void magma_zlarfgx_gpu_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex* dx,
magmaDoubleComplex *dtau, double *dxnorm,
magmaDoubleComplex *dA, int it)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaDoubleComplex scale;
__shared__ double xnorm;
magmaDoubleComplex dxi;
if ( j < n-1 )
dxi = dx[j];
if ( i == 0 ) {
xnorm = *dxnorm;
#ifdef REAL
double alpha = *dx0;
double alphai = MAGMA_Z_ZERO;
if ( (xnorm == 0 && alphai == MAGMA_Z_ZERO ) || n == 1 )
#else
magmaDoubleComplex alpha = *dx0;
double alphar = MAGMA_Z_REAL(alpha), alphai = MAGMA_Z_IMAG(alpha);
if ( (xnorm == 0 && alphai == MAGMA_Z_ZERO ) || n == 0 )
#endif
{
*dtau = MAGMA_Z_ZERO;
*dA = *dx0;
}
else {
#ifdef REAL
// no need to compute the norm as it is passed as input
double beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j == 0) {
*dtau = (beta - alpha) / beta;
//*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = beta;
}
scale = 1. / (alpha - beta);
#else
// no need to compute the norm as it is passed as input
double beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j == 0) {
*dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta);
//*dx0 = MAGMA_Z_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = MAGMA_Z_MAKE(beta, 0.);
}
alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha));
scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha);
#endif
}
}
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_Z_MUL(dxi, scale);
if (j < it) {
*( dA-it+j) = *(dx0-it+j);
*(dx0-it+j) = MAGMA_Z_MAKE(0., 0.);
}
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's zlarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_zlarfgx_gpu_q(
magma_int_t n,
magmaDoubleComplex_ptr dx0,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dtau,
magmaDouble_ptr dxnorm,
magmaDoubleComplex_ptr dA, magma_int_t iter,
magma_queue_t queue )
{
dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) );
dim3 threads( BLOCK_SIZE );
magma_zlarfgx_gpu_kernel
<<< blocks, threads, 0, queue->cuda_stream() >>>
( n, dx0, dx, dtau, dxnorm, dA, iter);
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's zlarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_zlarfgtx_gpu_q(
magma_int_t n,
magmaDoubleComplex_ptr dx0,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dtau,
magmaDouble_ptr dxnorm,
magmaDoubleComplex_ptr dA, magma_int_t iter,
magmaDoubleComplex_ptr V, magma_int_t ldv,
magmaDoubleComplex_ptr T, magma_int_t ldt,
magmaDoubleComplex_ptr dwork,
magma_queue_t queue )
{
/* Generate the elementary reflector H(iter) */
magma_zlarfgx_gpu_q(n, dx0, dx, dtau, dxnorm, dA, iter, queue);
if (iter == 0) {
magmaDoubleComplex tt = MAGMA_Z_ONE;
magmablas_zlacpy_q( MagmaFull, 1, 1, dtau, 1, T+iter+iter*ldt, 1, queue );
magma_zsetmatrix_q( 1, 1, &tt, 1, dx0, 1, queue );
}
else {
/* Compute the iter-th column of T */
magma_zgemv_kernel3
<<< iter, BLOCK_SIZE, 0, queue->cuda_stream() >>>
( n, V, ldv, dx0, dwork, dtau );
magma_ztrmv_kernel2
<<< iter, iter, 0, queue->cuda_stream() >>>
( T, ldt, dwork, T+iter*ldt, dtau );
}
}
//==============================================================================
|
85b9bcbcdfa1c0090a1edf17d1c8668aa849e744.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _MSC_VER
#define _USE_MATH_DEFINES // For M_PI
#endif // _MSC_VER
#include <cmath>
#include "caffe2/operators/roi_align_rotated_gradient_op.h"
#include <stdio.h>
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/GpuAtomics.cuh"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignRotatedBackward(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois,
bool continuous_coordinate) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Do not round
T roi_offset = continuous_coordinate ? T(0.5) : 0;
T roi_center_w = offset_bottom_rois[1] * spatial_scale - roi_offset;
T roi_center_h = offset_bottom_rois[2] * spatial_scale - roi_offset;
T roi_width = offset_bottom_rois[3] * spatial_scale;
T roi_height = offset_bottom_rois[4] * spatial_scale;
T theta = offset_bottom_rois[5] * M_PI / 180.0;
if (!continuous_coordinate) { // backward compatibility
// Force malformed ROIs to be 1x1
roi_width = c10::hip::compat::max(roi_width, (T)1.);
roi_height = c10::hip::compat::max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
gpu_atomic_add(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
gpu_atomic_add(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
gpu_atomic_add(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
gpu_atomic_add(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignRotatedBackward
} // namespace
template <>
C10_EXPORT bool RoIAlignRotatedGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto& dY = Input(2); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(
0, X.sizes(), at::dtype<float>()); // Gradient of net w.r.t. input to
// "forward" op (aka "gradInput")
// Must zero-out dX before accumulating gradients
math::Set<float, CUDAContext>(
dX->numel(), 0.f, dX->mutable_data<float>(), &context_);
if (dY.numel() > 0) { // Handle possibly empty gradient if there were no rois
hipLaunchKernelGGL(( RoIAlignRotatedBackward<float>)
, dim3(CAFFE_GET_BLOCKS(dY.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
dY.numel(),
dY.data<float>(),
R.dim32(0),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
sampling_ratio_,
dX->mutable_data<float>(),
R.data<float>(),
aligned_);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
return true;
}
REGISTER_CUDA_OPERATOR(
RoIAlignRotatedGradient,
RoIAlignRotatedGradientOp<float, CUDAContext>);
} // namespace caffe2
| 85b9bcbcdfa1c0090a1edf17d1c8668aa849e744.cu | #ifdef _MSC_VER
#define _USE_MATH_DEFINES // For M_PI
#endif // _MSC_VER
#include <cmath>
#include "caffe2/operators/roi_align_rotated_gradient_op.h"
#include <stdio.h>
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/GpuAtomics.cuh"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignRotatedBackward(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois,
bool continuous_coordinate) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Do not round
T roi_offset = continuous_coordinate ? T(0.5) : 0;
T roi_center_w = offset_bottom_rois[1] * spatial_scale - roi_offset;
T roi_center_h = offset_bottom_rois[2] * spatial_scale - roi_offset;
T roi_width = offset_bottom_rois[3] * spatial_scale;
T roi_height = offset_bottom_rois[4] * spatial_scale;
T theta = offset_bottom_rois[5] * M_PI / 180.0;
if (!continuous_coordinate) { // backward compatibility
// Force malformed ROIs to be 1x1
roi_width = c10::cuda::compat::max(roi_width, (T)1.);
roi_height = c10::cuda::compat::max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
gpu_atomic_add(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
gpu_atomic_add(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
gpu_atomic_add(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
gpu_atomic_add(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignRotatedBackward
} // namespace
template <>
C10_EXPORT bool RoIAlignRotatedGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto& dY = Input(2); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(
0, X.sizes(), at::dtype<float>()); // Gradient of net w.r.t. input to
// "forward" op (aka "gradInput")
// Must zero-out dX before accumulating gradients
math::Set<float, CUDAContext>(
dX->numel(), 0.f, dX->mutable_data<float>(), &context_);
if (dY.numel() > 0) { // Handle possibly empty gradient if there were no rois
RoIAlignRotatedBackward<float>
<<<CAFFE_GET_BLOCKS(dY.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
dY.numel(),
dY.data<float>(),
R.dim32(0),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
sampling_ratio_,
dX->mutable_data<float>(),
R.data<float>(),
aligned_);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
return true;
}
REGISTER_CUDA_OPERATOR(
RoIAlignRotatedGradient,
RoIAlignRotatedGradientOp<float, CUDAContext>);
} // namespace caffe2
|
bec195cc18e6a0894f4db8bfcbed8617efc91c4e.hip | // !!! This is a file automatically generated by hipify!!!
/*=========================================================================
Program: Insight Segmentation & Registration Toolkit
Module: CudaTextureFunctions.cu
Language: CUDA
Copyright (c) Insight Software Consortium. All rights reserved.
See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
=========================================================================*/
/** \class CudaTextureFunctions.cu
* \brief Cuda kernel code
* \author Phillip Ward, Luke Parkinson, Daniel Micevski, Christopher
* Share, Victorian Partnership for Advanced Computing (VPAC).
* Richard Beare, Monash University
*/
/*
* File Name: Cuda Texture Functions
*
* Author: Phillip Ward
* Creation Date: Monday, January 18 2010, 10:00
* Last Modified: Wednesday, February 23 2010, 16:35
*
* File Description:
*
*/
#include "EclipseCompat.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
void copy3DHostToArray(float *_src, hipArray *_dst, hipExtent copy_extent, hipPos src_offset, int3 imageDim)
{
hipMemcpy3DParms copyParams = {0};
float *h_source = _src + src_offset.x + src_offset.y*imageDim.x + src_offset.z*imageDim.x*imageDim.y;
copyParams.srcPtr = make_hipPitchedPtr((void*)h_source, imageDim.x*sizeof(float), imageDim.x, imageDim.y);
copyParams.dstArray = _dst;
copyParams.kind = hipMemcpyHostToDevice;
copyParams.extent = copy_extent;
CUDA_SAFE_CALL(hipMemcpy3D(©Params));
CUT_CHECK_ERROR("Host -> Array Memcpy failed\n");
}
void copy3DDeviceToArray(float *_src, hipArray *_dst, hipExtent copy_extent, hipPos src_offset, int3 imageDim)
{
hipMemcpy3DParms copyParams = {0};
float *d_source = _src + src_offset.x + src_offset.y*imageDim.x + src_offset.z*imageDim.x*imageDim.y;
copyParams.srcPtr = make_hipPitchedPtr((void*)d_source, imageDim.x*sizeof(float), imageDim.x, imageDim.y);
copyParams.dstArray = _dst;
copyParams.kind = hipMemcpyDeviceToDevice;
copyParams.extent = copy_extent;
CUDA_SAFE_CALL(hipMemcpy3D(©Params));
CUT_CHECK_ERROR("Device -> Array Memcpy failed\n");
}
void copy3DMemToArray(hipPitchedPtr _src, hipArray *_dst, int3 imageDim)
{
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = _src;
copyParams.dstArray = _dst;
copyParams.kind = hipMemcpyDeviceToDevice;
copyParams.extent = make_hipExtent(imageDim.x, imageDim.y, imageDim.z);
CUDA_SAFE_CALL(hipMemcpy3D(©Params));
CUT_CHECK_ERROR("Mem -> Array Memcpy failed\n");
}
| bec195cc18e6a0894f4db8bfcbed8617efc91c4e.cu | /*=========================================================================
Program: Insight Segmentation & Registration Toolkit
Module: CudaTextureFunctions.cu
Language: CUDA
Copyright (c) Insight Software Consortium. All rights reserved.
See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
=========================================================================*/
/** \class CudaTextureFunctions.cu
* \brief Cuda kernel code
* \author Phillip Ward, Luke Parkinson, Daniel Micevski, Christopher
* Share, Victorian Partnership for Advanced Computing (VPAC).
* Richard Beare, Monash University
*/
/*
* File Name: Cuda Texture Functions
*
* Author: Phillip Ward
* Creation Date: Monday, January 18 2010, 10:00
* Last Modified: Wednesday, February 23 2010, 16:35
*
* File Description:
*
*/
#include "EclipseCompat.h"
#include <stdio.h>
#include <cuda.h>
#include <cutil.h>
void copy3DHostToArray(float *_src, cudaArray *_dst, cudaExtent copy_extent, cudaPos src_offset, int3 imageDim)
{
cudaMemcpy3DParms copyParams = {0};
float *h_source = _src + src_offset.x + src_offset.y*imageDim.x + src_offset.z*imageDim.x*imageDim.y;
copyParams.srcPtr = make_cudaPitchedPtr((void*)h_source, imageDim.x*sizeof(float), imageDim.x, imageDim.y);
copyParams.dstArray = _dst;
copyParams.kind = cudaMemcpyHostToDevice;
copyParams.extent = copy_extent;
CUDA_SAFE_CALL(cudaMemcpy3D(©Params));
CUT_CHECK_ERROR("Host -> Array Memcpy failed\n");
}
void copy3DDeviceToArray(float *_src, cudaArray *_dst, cudaExtent copy_extent, cudaPos src_offset, int3 imageDim)
{
cudaMemcpy3DParms copyParams = {0};
float *d_source = _src + src_offset.x + src_offset.y*imageDim.x + src_offset.z*imageDim.x*imageDim.y;
copyParams.srcPtr = make_cudaPitchedPtr((void*)d_source, imageDim.x*sizeof(float), imageDim.x, imageDim.y);
copyParams.dstArray = _dst;
copyParams.kind = cudaMemcpyDeviceToDevice;
copyParams.extent = copy_extent;
CUDA_SAFE_CALL(cudaMemcpy3D(©Params));
CUT_CHECK_ERROR("Device -> Array Memcpy failed\n");
}
void copy3DMemToArray(cudaPitchedPtr _src, cudaArray *_dst, int3 imageDim)
{
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = _src;
copyParams.dstArray = _dst;
copyParams.kind = cudaMemcpyDeviceToDevice;
copyParams.extent = make_cudaExtent(imageDim.x, imageDim.y, imageDim.z);
CUDA_SAFE_CALL(cudaMemcpy3D(©Params));
CUT_CHECK_ERROR("Mem -> Array Memcpy failed\n");
}
|
57818a56a9378acf11a3f4a4d59b618e1683d7e8.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
Copyright (c) 2017-2018 Origin Quantum Computing Co., Ltd.. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language
governing permissions and
limitations under the License.
Author:Xue Cheng
Date:2017-12-13
Description: Definition of Cuda function of gates
************************************************************************/
#include <vector>
#include <algorithm>
#include <time.h>
#include <cuda_device_runtime_api.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#include "GPUGates.hpp"
using namespace std;
namespace gpu {
__global__ void
unitarysingle(
STATE_T * psireal,
STATE_T * psiimag,
QSIZE Dim,
QSIZE Block,
STATE_T matrix_real00,
STATE_T matrix_real01,
STATE_T matrix_real10,
STATE_T matrix_real11,
STATE_T matrix_imag00,
STATE_T matrix_imag01,
STATE_T matrix_imag10,
STATE_T matrix_imag11)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
QSIZE BlockNum = idx / Block;
QSIZE BlockInt = idx % Block;
QSIZE realIdx = BlockNum * 2 * Block + BlockInt;
QSIZE corIdx = realIdx + Block;
if (corIdx < Dim)
{
STATE_T X1 = psireal[realIdx];
STATE_T X2 = psireal[corIdx];
STATE_T Y1 = psiimag[realIdx];
STATE_T Y2 = psiimag[corIdx];
psireal[realIdx] = matrix_real00 * X1 - matrix_imag00 * Y1 + matrix_real01 * X2 - matrix_imag01 * Y2;
psireal[corIdx] = matrix_real10 * X1 - matrix_imag10 * Y1 + matrix_real11 * X2 - matrix_imag11 * Y2;
psiimag[realIdx] = matrix_real00 * Y1 + matrix_imag00 * X1 + matrix_real01 * Y2 + matrix_imag01 * X2;
psiimag[corIdx] = matrix_real10 * Y1 + matrix_imag10 * X1 + matrix_real11 * Y2 + matrix_imag11 * X2;
}
}
__global__ void controlunitarysingle(
STATE_T * psireal,
STATE_T * psiimag,
QSIZE Dim,
QSIZE target_qubit,
QSIZE controller_mask,
STATE_T matrix_real00,
STATE_T matrix_real01,
STATE_T matrix_real10,
STATE_T matrix_real11,
STATE_T matrix_imag00,
STATE_T matrix_imag01,
STATE_T matrix_imag10,
STATE_T matrix_imag11
)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
if (
idx < Dim &&
((idx & controller_mask) == controller_mask) &&
((idx & target_qubit) == target_qubit)
)
{
QSIZE corIdx = idx; //1
QSIZE realIdx = corIdx - target_qubit; //0
STATE_T X1 = psireal[realIdx];
STATE_T X2 = psireal[corIdx];
STATE_T Y1 = psiimag[realIdx];
STATE_T Y2 = psiimag[corIdx];
psireal[realIdx] = matrix_real00 * X1 - matrix_imag00 * Y1 + matrix_real01 * X2 - matrix_imag01 * Y2;
psireal[corIdx] = matrix_real10 * X1 - matrix_imag10 * Y1 + matrix_real11 * X2 - matrix_imag11 * Y2;
psiimag[realIdx] = matrix_real00 * Y1 + matrix_imag00 * X1 + matrix_real01 * Y2 + matrix_imag01 * X2;
psiimag[corIdx] = matrix_real10 * Y1 + matrix_imag10 * X1 + matrix_real11 * Y2 + matrix_imag11 * X2;
}
}
__global__ void
unitarydouble(
STATE_T * psireal,
STATE_T * psiimag,
QSIZE Dim,
QSIZE Block1,
QSIZE Block2,
STATE_T real0000,
STATE_T real0001,
STATE_T real0010,
STATE_T real0011,
STATE_T real0100,
STATE_T real0101,
STATE_T real0110,
STATE_T real0111,
STATE_T real1000,
STATE_T real1001,
STATE_T real1010,
STATE_T real1011,
STATE_T real1100,
STATE_T real1101,
STATE_T real1110,
STATE_T real1111,
STATE_T imag0000,
STATE_T imag0001,
STATE_T imag0010,
STATE_T imag0011,
STATE_T imag0100,
STATE_T imag0101,
STATE_T imag0110,
STATE_T imag0111,
STATE_T imag1000,
STATE_T imag1001,
STATE_T imag1010,
STATE_T imag1011,
STATE_T imag1100,
STATE_T imag1101,
STATE_T imag1110,
STATE_T imag1111)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x;
QSIZE Idx00, Idx01, Idx10, Idx11;
if (Block1 > Block2)
{
Idx10 = (idx / (Block1 / 2)) * 2 * Block1 + Block1 + (idx % (Block1 / 2) / Block2) * 2 * Block2 + idx % Block2;
}
else
{
Idx10 = (idx / (Block2 / 2)) * 2 * Block2 + (idx % (Block2 / 2) / Block1) * 2 * Block1 + Block1 + idx % Block1;
}
Idx00 = Idx10 - Block1;
Idx01 = Idx00 + Block2;
Idx11 = Idx10 + Block2;
if (Idx11 < Dim)
{
STATE_T X00 = psireal[Idx00];
STATE_T X01 = psireal[Idx01];
STATE_T X10 = psireal[Idx10];
STATE_T X11 = psireal[Idx11];
STATE_T Y00 = psiimag[Idx00];
STATE_T Y01 = psiimag[Idx01];
STATE_T Y10 = psiimag[Idx10];
STATE_T Y11 = psiimag[Idx11];
psireal[Idx00] = real0000 * X00 - imag0000 * Y00
+ real0001 * X01 - imag0001 * Y01
+ real0010 * X10 - imag0010 * Y10
+ real0011 * X11 - imag0011 * Y11;
psiimag[Idx00] = imag0000 * X00 + real0000 * Y00
+ imag0001 * X01 + real0001 * Y01
+ imag0010 * X10 + real0010 * Y10
+ imag0011 * X11 + real0011 * Y11;
psireal[Idx01] = real0100 * X00 - imag0100 * Y00
+ real0101 * X01 - imag0101 * Y01
+ real0110 * X10 - imag0110 * Y10
+ real0111 * X11 - imag0111 * Y11;
psiimag[Idx01] = imag0100 * X00 + real0100 * Y00
+ imag0101 * X01 + real0101 * Y01
+ imag0110 * X10 + real0110 * Y10
+ imag0111 * X11 + real0111 * Y11;
psireal[Idx10] = real1000 * X00 - imag1000 * Y00
+ real1001 * X01 - imag1001 * Y01
+ real1010 * X10 - imag1010 * Y10
+ real1011 * X11 - imag1011 * Y11;
psiimag[Idx10] = imag1000 * X00 + real1000 * Y00
+ imag1001 * X01 + real1001 * Y01
+ imag1010 * X10 + real1010 * Y10
+ imag1011 * X11 + real1011 * Y11;
psireal[Idx11] = real1100 * X00 - imag1100 * Y00
+ real1101 * X01 - imag1101 * Y01
+ real1110 * X10 - imag1110 * Y10
+ real1111 * X11 - imag1111 * Y11;
psiimag[Idx11] = imag1100 * X00 + real1100 * Y00
+ imag1101 * X01 + real1101 * Y01
+ imag1110 * X10 + real1110 * Y10
+ imag1111 * X11 + real1111 * Y11;
}
}
__global__ void controlunitarydouble(
STATE_T * psireal,
STATE_T * psiimag,
QSIZE Dim,
QSIZE controller_mask,
QSIZE control_qubit,
QSIZE target_qubit,
STATE_T real0000,
STATE_T real0001,
STATE_T real0010,
STATE_T real0011,
STATE_T real0100,
STATE_T real0101,
STATE_T real0110,
STATE_T real0111,
STATE_T real1000,
STATE_T real1001,
STATE_T real1010,
STATE_T real1011,
STATE_T real1100,
STATE_T real1101,
STATE_T real1110,
STATE_T real1111,
STATE_T imag0000,
STATE_T imag0001,
STATE_T imag0010,
STATE_T imag0011,
STATE_T imag0100,
STATE_T imag0101,
STATE_T imag0110,
STATE_T imag0111,
STATE_T imag1000,
STATE_T imag1001,
STATE_T imag1010,
STATE_T imag1011,
STATE_T imag1100,
STATE_T imag1101,
STATE_T imag1110,
STATE_T imag1111)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
if (
idx < Dim &&
((idx & controller_mask) == controller_mask) &&
((idx & control_qubit) == control_qubit) &&
((idx & target_qubit) == target_qubit)
)
{
QSIZE Idx00 = idx - control_qubit - target_qubit;
QSIZE Idx01 = Idx00 - control_qubit;
QSIZE Idx10 = Idx00 - target_qubit;
QSIZE Idx11 = idx;
STATE_T X00 = psireal[Idx00];
STATE_T X01 = psireal[Idx01];
STATE_T X10 = psireal[Idx10];
STATE_T X11 = psireal[Idx11];
STATE_T Y00 = psiimag[Idx00];
STATE_T Y01 = psiimag[Idx01];
STATE_T Y10 = psiimag[Idx10];
STATE_T Y11 = psiimag[Idx11];
psireal[Idx00] = real0000 * X00 - imag0000 * Y00
+ real0001 * X01 - imag0001 * Y01
+ real0010 * X10 - imag0010 * Y10
+ real0011 * X11 - imag0011 * Y11;
psiimag[Idx00] = imag0000 * X00 + real0000 * Y00
+ imag0001 * X01 + real0001 * Y01
+ imag0010 * X10 + real0010 * Y10
+ imag0011 * X11 + real0011 * Y11;
psireal[Idx01] = real0100 * X00 - imag0100 * Y00
+ real0101 * X01 - imag0101 * Y01
+ real0110 * X10 - imag0110 * Y10
+ real0111 * X11 - imag0111 * Y11;
psiimag[Idx01] = imag0100 * X00 + real0100 * Y00
+ imag0101 * X01 + real0101 * Y01
+ imag0110 * X10 + real0110 * Y10
+ imag0111 * X11 + real0111 * Y11;
psireal[Idx10] = real1000 * X00 - imag1000 * Y00
+ real1001 * X01 - imag1001 * Y01
+ real1010 * X10 - imag1010 * Y10
+ real1011 * X11 - imag1011 * Y11;
psiimag[Idx10] = imag1000 * X00 + real1000 * Y00
+ imag1001 * X01 + real1001 * Y01
+ imag1010 * X10 + real1010 * Y10
+ imag1011 * X11 + real1011 * Y11;
psireal[Idx11] = real1100 * X00 - imag1100 * Y00
+ real1101 * X01 - imag1101 * Y01
+ real1110 * X10 - imag1110 * Y10
+ real1111 * X11 - imag1111 * Y11;
psiimag[Idx11] = imag1100 * X00 + real1100 * Y00
+ imag1101 * X01 + real1101 * Y01
+ imag1110 * X10 + real1110 * Y10
+ imag1111 * X11 + real1111 * Y11;
}
}
__global__ void initState(STATE_T * psireal, STATE_T * psiimag, QSIZE Dim)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
if (idx < Dim && idx != 0)
{
psireal[idx] = 0;
psiimag[idx] = 0;
}
if (0 == idx)
{
psireal[0] = 1;
psiimag[0] = 0;
}
}
__global__ void qubitprob(STATE_T * psireal, STATE_T * psiimag, QSIZE Dim, QSIZE Block, STATE_T *pr)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x;
QSIZE bid = blockIdx.x, tid = threadIdx.x;
QSIZE BlockNum = idx / Block;
QSIZE BlockInt = idx % Block;
QSIZE realIdx = BlockNum * 2 * Block + BlockInt;
QSIZE corIdx = realIdx + Block;
extern __shared__ STATE_T dprob[];
dprob[tid] = 0;
if (corIdx < Dim)
{
dprob[tid] = psireal[realIdx] * psireal[realIdx] + psiimag[realIdx] * psiimag[realIdx];
__syncthreads();
int offset = 1, mask = 1;
while (offset < THREADDIM)
{
if ((tid & mask) == 0)
{
dprob[tid] += dprob[tid + offset];
}
offset += offset;
mask = offset + mask;
__syncthreads();
}
if (tid == 0)
{
pr[bid] = dprob[0];
}
}
}
__global__ void probsumnew1(STATE_T * psireal, STATE_T * psiimag, STATE_T *probtemp, size_t num1, size_t m, size_t Dim, size_t * block)
{
size_t idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
size_t index1, index = 0, index2, k, s;
double temp = 0;
index1 = num1 + idx; //index1idx
if (index1 < (1u << m))
{
for (size_t j = 0; j < m; j++)
{
index += block[j] * ((index1 >> j) % 2);
}//index idx
for (size_t i = 0; i < Dim / (1u << m); i++)
{
index2 = i;
for (size_t j = 0; j < m; j++)
{
s = index2 / block[j];
k = index2 % block[j];
index2 = s * 2 * block[j] + k;
}
index2 += index;
temp += psireal[index2] * psireal[index2] + psiimag[index2] * psiimag[index2];
}
probtemp[idx] = temp;
}
}
__global__ void probsum(STATE_T * pr, STATE_T * prob)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
if (0 == idx)
{
STATE_T dprob = 0;
for (int i = 0; i < gridDim.x; i++)
{
dprob += pr[i];
}
*prob = dprob;
}
}//checked and can be optimized
__global__ void qubitcollapse0(STATE_T * psireal, STATE_T * psiimag, QSIZE Dim, QSIZE Block, STATE_T coef)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
QSIZE BlockNum = idx / Block;
QSIZE BlockInt = idx % Block;
QSIZE realIdx = BlockNum * 2 * Block + BlockInt;
QSIZE corIdx = realIdx + Block;
if (corIdx < Dim)
{
STATE_T X1 = psireal[realIdx];
STATE_T Y1 = psiimag[realIdx];
psireal[realIdx] = X1 * coef;
psireal[corIdx] = 0;
psiimag[realIdx] = Y1 * coef;
psiimag[corIdx] = 0;
}
}//checked
__global__ void qubitcollapse1(STATE_T * psireal, STATE_T * psiimag, QSIZE Dim, QSIZE Block, STATE_T coef)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
QSIZE BlockNum = idx / Block;
QSIZE BlockInt = idx % Block;
QSIZE realIdx = BlockNum * 2 * Block + BlockInt;
QSIZE corIdx = realIdx + Block;
if (corIdx < Dim)
{
STATE_T X2 = psireal[corIdx];
STATE_T Y2 = psiimag[corIdx];
psireal[realIdx] = 0;
psireal[corIdx] = X2 * coef;
psiimag[realIdx] = 0;
psiimag[corIdx] = Y2 * coef;
}
}//checked
/**************************************************************************************
psireal:
psiimag:
pro: save probability
block: qubit number
m: target qubit number
dec: target qubit state
****************************************************************************************/
__global__ void multiprob(
STATE_T *psireal,
STATE_T *psiimag,
QSIZE Dim,
STATE_T *pro,
QSIZE *block,
QSIZE m,
QSIZE dec)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
QSIZE bid = blockIdx.x, tid = threadIdx.x;
//QSIZE BlockNum = idx / Block;
//QSIZE BlockInt = idx% Block;
extern __shared__ STATE_T dprob[];
dprob[tid] = 0;
QSIZE i, j, k;
if (idx < Dim / (1 << m))
{
QSIZE index = idx;
for (i = 0; i < m; i++)
{
j = index / block[i];
k = index % block[i];
index = j * 2 * block[i] + k;
} //index 0
QSIZE realIdx = index + dec; //
dprob[tid] = psireal[realIdx] * psireal[realIdx] + psiimag[realIdx] * psiimag[realIdx];
__syncthreads();//
int offset = 1, mask = 1;
while (offset < THREADDIM)
{
if ((tid & mask) == 0)
{
dprob[tid] += dprob[tid + offset];
}
offset += offset;
mask = offset + mask;
__syncthreads();
}
//,, thread 0 threadIdx.x =
//dprob[0]
if (tid == 0)
{
pro[bid] = dprob[0]; //pro
}
}
}
__global__ void pmeasure_many_target(
STATE_T* psireal,
STATE_T* psiimag,
STATE_T* result,
QSIZE qnum_mask,
QSIZE result_size,
QSIZE Dim)
{
QSIZE bid = blockIdx.x;
QSIZE tid = threadIdx.x;
QSIZE idx = blockDim.x*bid + tid;
// resultresult_size
// targetthreaddim10qubitpmeasure
result[idx] = 0;
if (idx < result_size)
{
for (QSIZE i = 0; i < Dim / result_size; ++i)
{
QSIZE realIdx = 0;
QSIZE copy_i = i; // i
QSIZE copy_idx = idx; //
// realIdx
//
// qnum_mask : 00100100
// copy_i = abcdef
// copy_idx = xy
//
// realIdx abxcdyef
// digitmask01
// flagDim=10000000061
// set_digitset_digitrealIdx = set_digit * (?) + realIdx
// digit & 1 == 0 copy_i copy_i & 1
// digit & 1 == 1 copy_idx
QSIZE set_digit = 1;
QSIZE qnum_mask_copy = qnum_mask;
int loops = 0;
for (QSIZE flag = Dim;
flag != 1;
flag >>= 1)
{
loops++;
if ((qnum_mask_copy & 1) == 0)
{
realIdx += (set_digit *(copy_i & 1));
copy_i >>= 1;
}
else
{
realIdx += (set_digit *(copy_idx & 1));
copy_idx >>= 1;
}
set_digit <<= 1;
qnum_mask_copy >>= 1;
}
result[idx] += (
psireal[realIdx] * psireal[realIdx] +
psiimag[realIdx] * psiimag[realIdx]
);
}
}
}
__global__ void pmeasure_one_target(
STATE_T* psireal,
STATE_T* psiimag,
STATE_T* result,
QSIZE qnum_mask,
QSIZE result_idx,
size_t result_dim,
QSIZE Dim)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
QSIZE bid = blockIdx.x, tid = threadIdx.x;
extern __shared__ STATE_T dprob[];
dprob[tid] = 0;
if (idx < (Dim>>result_dim))
{
QSIZE copy_idx = idx;
QSIZE copy_result_idx = result_idx;
// realIdx
//
// qnum_mask : 00100100
// idx = abcdef
// result_idx = xy
//
// realIdx abxcdyef
// digitmask01
// flagDim=10000000061
// set_digitset_digitrealIdx = set_digit * (?) + realIdx
// digit & 1 == 0 copy_idx copy_idx & 1
// digit & 1 == 1 copy_result_idx
QSIZE realIdx = 0;
QSIZE set_digit = 1;
QSIZE qnum_mask_copy = qnum_mask;
int loops = 0;
for (QSIZE flag = Dim;
flag != 1;
flag >>= 1)
{
loops++;
if ((qnum_mask_copy & 1) == 0)
{
realIdx += (set_digit *(copy_idx & 1));
copy_idx >>= 1;
}
else
{
realIdx += (set_digit *(copy_result_idx & 1));
copy_result_idx >>= 1;
}
set_digit <<= 1;
qnum_mask_copy >>= 1;
}
dprob[tid] = psireal[realIdx] * psireal[realIdx] + psiimag[realIdx] * psiimag[realIdx];
__syncthreads();
int offset = 1, mask = 1;
while (offset < THREADDIM)
{
if ((tid & mask) == 0)
{
dprob[tid] += dprob[tid + offset];
}
offset += offset;
mask = offset + mask;
__syncthreads();
}
if (tid == 0)
{
result[bid] = dprob[0];
}
}
}
double randGenerator()
{
int ia = 16807, im = 2147483647, iq = 127773, ir = 2836; /*difine constant number in 16807 generator.*/
time_t rawtime;
struct tm * timeinfo;
time(&rawtime);
timeinfo = localtime(&rawtime);
static int irandseed = timeinfo->tm_year + 70 *
(timeinfo->tm_mon + 1 + 12 *
(timeinfo->tm_mday + 31 *
(timeinfo->tm_hour + 23 *
(timeinfo->tm_min + 59 * timeinfo->tm_sec))));
static int irandnewseed;
if (ia*(irandseed%iq) - ir * (irandseed / iq) >= 0)
irandnewseed = ia * (irandseed%iq) - ir * (irandseed / iq);
else
irandnewseed = ia * (irandseed%iq) - ir * (irandseed / iq) + im;
irandseed = irandnewseed;
return (double)irandnewseed / im;
}
} // namespace gpu
| 57818a56a9378acf11a3f4a4d59b618e1683d7e8.cu | /******************************************************************************
Copyright (c) 2017-2018 Origin Quantum Computing Co., Ltd.. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language
governing permissions and
limitations under the License.
Author:Xue Cheng
Date:2017-12-13
Description: Definition of Cuda function of gates
************************************************************************/
#include <vector>
#include <algorithm>
#include <time.h>
#include <cuda_device_runtime_api.h>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
#include "GPUGates.hpp"
using namespace std;
namespace gpu {
__global__ void
unitarysingle(
STATE_T * psireal,
STATE_T * psiimag,
QSIZE Dim,
QSIZE Block,
STATE_T matrix_real00,
STATE_T matrix_real01,
STATE_T matrix_real10,
STATE_T matrix_real11,
STATE_T matrix_imag00,
STATE_T matrix_imag01,
STATE_T matrix_imag10,
STATE_T matrix_imag11)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
QSIZE BlockNum = idx / Block;
QSIZE BlockInt = idx % Block;
QSIZE realIdx = BlockNum * 2 * Block + BlockInt;
QSIZE corIdx = realIdx + Block;
if (corIdx < Dim)
{
STATE_T X1 = psireal[realIdx];
STATE_T X2 = psireal[corIdx];
STATE_T Y1 = psiimag[realIdx];
STATE_T Y2 = psiimag[corIdx];
psireal[realIdx] = matrix_real00 * X1 - matrix_imag00 * Y1 + matrix_real01 * X2 - matrix_imag01 * Y2;
psireal[corIdx] = matrix_real10 * X1 - matrix_imag10 * Y1 + matrix_real11 * X2 - matrix_imag11 * Y2;
psiimag[realIdx] = matrix_real00 * Y1 + matrix_imag00 * X1 + matrix_real01 * Y2 + matrix_imag01 * X2;
psiimag[corIdx] = matrix_real10 * Y1 + matrix_imag10 * X1 + matrix_real11 * Y2 + matrix_imag11 * X2;
}
}
__global__ void controlunitarysingle(
STATE_T * psireal,
STATE_T * psiimag,
QSIZE Dim,
QSIZE target_qubit,
QSIZE controller_mask,
STATE_T matrix_real00,
STATE_T matrix_real01,
STATE_T matrix_real10,
STATE_T matrix_real11,
STATE_T matrix_imag00,
STATE_T matrix_imag01,
STATE_T matrix_imag10,
STATE_T matrix_imag11
)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
if (
idx < Dim &&
((idx & controller_mask) == controller_mask) &&
((idx & target_qubit) == target_qubit)
)
{
QSIZE corIdx = idx; //1
QSIZE realIdx = corIdx - target_qubit; //0
STATE_T X1 = psireal[realIdx];
STATE_T X2 = psireal[corIdx];
STATE_T Y1 = psiimag[realIdx];
STATE_T Y2 = psiimag[corIdx];
psireal[realIdx] = matrix_real00 * X1 - matrix_imag00 * Y1 + matrix_real01 * X2 - matrix_imag01 * Y2;
psireal[corIdx] = matrix_real10 * X1 - matrix_imag10 * Y1 + matrix_real11 * X2 - matrix_imag11 * Y2;
psiimag[realIdx] = matrix_real00 * Y1 + matrix_imag00 * X1 + matrix_real01 * Y2 + matrix_imag01 * X2;
psiimag[corIdx] = matrix_real10 * Y1 + matrix_imag10 * X1 + matrix_real11 * Y2 + matrix_imag11 * X2;
}
}
__global__ void
unitarydouble(
STATE_T * psireal,
STATE_T * psiimag,
QSIZE Dim,
QSIZE Block1,
QSIZE Block2,
STATE_T real0000,
STATE_T real0001,
STATE_T real0010,
STATE_T real0011,
STATE_T real0100,
STATE_T real0101,
STATE_T real0110,
STATE_T real0111,
STATE_T real1000,
STATE_T real1001,
STATE_T real1010,
STATE_T real1011,
STATE_T real1100,
STATE_T real1101,
STATE_T real1110,
STATE_T real1111,
STATE_T imag0000,
STATE_T imag0001,
STATE_T imag0010,
STATE_T imag0011,
STATE_T imag0100,
STATE_T imag0101,
STATE_T imag0110,
STATE_T imag0111,
STATE_T imag1000,
STATE_T imag1001,
STATE_T imag1010,
STATE_T imag1011,
STATE_T imag1100,
STATE_T imag1101,
STATE_T imag1110,
STATE_T imag1111)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x;
QSIZE Idx00, Idx01, Idx10, Idx11;
if (Block1 > Block2)
{
Idx10 = (idx / (Block1 / 2)) * 2 * Block1 + Block1 + (idx % (Block1 / 2) / Block2) * 2 * Block2 + idx % Block2;
}
else
{
Idx10 = (idx / (Block2 / 2)) * 2 * Block2 + (idx % (Block2 / 2) / Block1) * 2 * Block1 + Block1 + idx % Block1;
}
Idx00 = Idx10 - Block1;
Idx01 = Idx00 + Block2;
Idx11 = Idx10 + Block2;
if (Idx11 < Dim)
{
STATE_T X00 = psireal[Idx00];
STATE_T X01 = psireal[Idx01];
STATE_T X10 = psireal[Idx10];
STATE_T X11 = psireal[Idx11];
STATE_T Y00 = psiimag[Idx00];
STATE_T Y01 = psiimag[Idx01];
STATE_T Y10 = psiimag[Idx10];
STATE_T Y11 = psiimag[Idx11];
psireal[Idx00] = real0000 * X00 - imag0000 * Y00
+ real0001 * X01 - imag0001 * Y01
+ real0010 * X10 - imag0010 * Y10
+ real0011 * X11 - imag0011 * Y11;
psiimag[Idx00] = imag0000 * X00 + real0000 * Y00
+ imag0001 * X01 + real0001 * Y01
+ imag0010 * X10 + real0010 * Y10
+ imag0011 * X11 + real0011 * Y11;
psireal[Idx01] = real0100 * X00 - imag0100 * Y00
+ real0101 * X01 - imag0101 * Y01
+ real0110 * X10 - imag0110 * Y10
+ real0111 * X11 - imag0111 * Y11;
psiimag[Idx01] = imag0100 * X00 + real0100 * Y00
+ imag0101 * X01 + real0101 * Y01
+ imag0110 * X10 + real0110 * Y10
+ imag0111 * X11 + real0111 * Y11;
psireal[Idx10] = real1000 * X00 - imag1000 * Y00
+ real1001 * X01 - imag1001 * Y01
+ real1010 * X10 - imag1010 * Y10
+ real1011 * X11 - imag1011 * Y11;
psiimag[Idx10] = imag1000 * X00 + real1000 * Y00
+ imag1001 * X01 + real1001 * Y01
+ imag1010 * X10 + real1010 * Y10
+ imag1011 * X11 + real1011 * Y11;
psireal[Idx11] = real1100 * X00 - imag1100 * Y00
+ real1101 * X01 - imag1101 * Y01
+ real1110 * X10 - imag1110 * Y10
+ real1111 * X11 - imag1111 * Y11;
psiimag[Idx11] = imag1100 * X00 + real1100 * Y00
+ imag1101 * X01 + real1101 * Y01
+ imag1110 * X10 + real1110 * Y10
+ imag1111 * X11 + real1111 * Y11;
}
}
__global__ void controlunitarydouble(
STATE_T * psireal,
STATE_T * psiimag,
QSIZE Dim,
QSIZE controller_mask,
QSIZE control_qubit,
QSIZE target_qubit,
STATE_T real0000,
STATE_T real0001,
STATE_T real0010,
STATE_T real0011,
STATE_T real0100,
STATE_T real0101,
STATE_T real0110,
STATE_T real0111,
STATE_T real1000,
STATE_T real1001,
STATE_T real1010,
STATE_T real1011,
STATE_T real1100,
STATE_T real1101,
STATE_T real1110,
STATE_T real1111,
STATE_T imag0000,
STATE_T imag0001,
STATE_T imag0010,
STATE_T imag0011,
STATE_T imag0100,
STATE_T imag0101,
STATE_T imag0110,
STATE_T imag0111,
STATE_T imag1000,
STATE_T imag1001,
STATE_T imag1010,
STATE_T imag1011,
STATE_T imag1100,
STATE_T imag1101,
STATE_T imag1110,
STATE_T imag1111)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
if (
idx < Dim &&
((idx & controller_mask) == controller_mask) &&
((idx & control_qubit) == control_qubit) &&
((idx & target_qubit) == target_qubit)
)
{
QSIZE Idx00 = idx - control_qubit - target_qubit;
QSIZE Idx01 = Idx00 - control_qubit;
QSIZE Idx10 = Idx00 - target_qubit;
QSIZE Idx11 = idx;
STATE_T X00 = psireal[Idx00];
STATE_T X01 = psireal[Idx01];
STATE_T X10 = psireal[Idx10];
STATE_T X11 = psireal[Idx11];
STATE_T Y00 = psiimag[Idx00];
STATE_T Y01 = psiimag[Idx01];
STATE_T Y10 = psiimag[Idx10];
STATE_T Y11 = psiimag[Idx11];
psireal[Idx00] = real0000 * X00 - imag0000 * Y00
+ real0001 * X01 - imag0001 * Y01
+ real0010 * X10 - imag0010 * Y10
+ real0011 * X11 - imag0011 * Y11;
psiimag[Idx00] = imag0000 * X00 + real0000 * Y00
+ imag0001 * X01 + real0001 * Y01
+ imag0010 * X10 + real0010 * Y10
+ imag0011 * X11 + real0011 * Y11;
psireal[Idx01] = real0100 * X00 - imag0100 * Y00
+ real0101 * X01 - imag0101 * Y01
+ real0110 * X10 - imag0110 * Y10
+ real0111 * X11 - imag0111 * Y11;
psiimag[Idx01] = imag0100 * X00 + real0100 * Y00
+ imag0101 * X01 + real0101 * Y01
+ imag0110 * X10 + real0110 * Y10
+ imag0111 * X11 + real0111 * Y11;
psireal[Idx10] = real1000 * X00 - imag1000 * Y00
+ real1001 * X01 - imag1001 * Y01
+ real1010 * X10 - imag1010 * Y10
+ real1011 * X11 - imag1011 * Y11;
psiimag[Idx10] = imag1000 * X00 + real1000 * Y00
+ imag1001 * X01 + real1001 * Y01
+ imag1010 * X10 + real1010 * Y10
+ imag1011 * X11 + real1011 * Y11;
psireal[Idx11] = real1100 * X00 - imag1100 * Y00
+ real1101 * X01 - imag1101 * Y01
+ real1110 * X10 - imag1110 * Y10
+ real1111 * X11 - imag1111 * Y11;
psiimag[Idx11] = imag1100 * X00 + real1100 * Y00
+ imag1101 * X01 + real1101 * Y01
+ imag1110 * X10 + real1110 * Y10
+ imag1111 * X11 + real1111 * Y11;
}
}
__global__ void initState(STATE_T * psireal, STATE_T * psiimag, QSIZE Dim)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
if (idx < Dim && idx != 0)
{
psireal[idx] = 0;
psiimag[idx] = 0;
}
if (0 == idx)
{
psireal[0] = 1;
psiimag[0] = 0;
}
}
__global__ void qubitprob(STATE_T * psireal, STATE_T * psiimag, QSIZE Dim, QSIZE Block, STATE_T *pr)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x;
QSIZE bid = blockIdx.x, tid = threadIdx.x;
QSIZE BlockNum = idx / Block;
QSIZE BlockInt = idx % Block;
QSIZE realIdx = BlockNum * 2 * Block + BlockInt;
QSIZE corIdx = realIdx + Block;
extern __shared__ STATE_T dprob[];
dprob[tid] = 0;
if (corIdx < Dim)
{
dprob[tid] = psireal[realIdx] * psireal[realIdx] + psiimag[realIdx] * psiimag[realIdx];
__syncthreads();
int offset = 1, mask = 1;
while (offset < THREADDIM)
{
if ((tid & mask) == 0)
{
dprob[tid] += dprob[tid + offset];
}
offset += offset;
mask = offset + mask;
__syncthreads();
}
if (tid == 0)
{
pr[bid] = dprob[0];
}
}
}
__global__ void probsumnew1(STATE_T * psireal, STATE_T * psiimag, STATE_T *probtemp, size_t num1, size_t m, size_t Dim, size_t * block)
{
size_t idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
size_t index1, index = 0, index2, k, s;
double temp = 0;
index1 = num1 + idx; //index1表示idx对应的不加权的态序号
if (index1 < (1u << m))
{
for (size_t j = 0; j < m; j++)
{
index += block[j] * ((index1 >> j) % 2);
}//index 表示idx对应的态的序号
for (size_t i = 0; i < Dim / (1u << m); i++)
{
index2 = i;
for (size_t j = 0; j < m; j++)
{
s = index2 / block[j];
k = index2 % block[j];
index2 = s * 2 * block[j] + k;
}
index2 += index;
temp += psireal[index2] * psireal[index2] + psiimag[index2] * psiimag[index2];
}
probtemp[idx] = temp;
}
}
__global__ void probsum(STATE_T * pr, STATE_T * prob)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
if (0 == idx)
{
STATE_T dprob = 0;
for (int i = 0; i < gridDim.x; i++)
{
dprob += pr[i];
}
*prob = dprob;
}
}//checked and can be optimized
__global__ void qubitcollapse0(STATE_T * psireal, STATE_T * psiimag, QSIZE Dim, QSIZE Block, STATE_T coef)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
QSIZE BlockNum = idx / Block;
QSIZE BlockInt = idx % Block;
QSIZE realIdx = BlockNum * 2 * Block + BlockInt;
QSIZE corIdx = realIdx + Block;
if (corIdx < Dim)
{
STATE_T X1 = psireal[realIdx];
STATE_T Y1 = psiimag[realIdx];
psireal[realIdx] = X1 * coef;
psireal[corIdx] = 0;
psiimag[realIdx] = Y1 * coef;
psiimag[corIdx] = 0;
}
}//checked
__global__ void qubitcollapse1(STATE_T * psireal, STATE_T * psiimag, QSIZE Dim, QSIZE Block, STATE_T coef)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
QSIZE BlockNum = idx / Block;
QSIZE BlockInt = idx % Block;
QSIZE realIdx = BlockNum * 2 * Block + BlockInt;
QSIZE corIdx = realIdx + Block;
if (corIdx < Dim)
{
STATE_T X2 = psireal[corIdx];
STATE_T Y2 = psiimag[corIdx];
psireal[realIdx] = 0;
psireal[corIdx] = X2 * coef;
psiimag[realIdx] = 0;
psiimag[corIdx] = Y2 * coef;
}
}//checked
/**************************************************************************************
psireal:
psiimag:
pro: save probability
block: qubit number
m: target qubit number
dec: target qubit state
****************************************************************************************/
__global__ void multiprob(
STATE_T *psireal,
STATE_T *psiimag,
QSIZE Dim,
STATE_T *pro,
QSIZE *block,
QSIZE m,
QSIZE dec)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
QSIZE bid = blockIdx.x, tid = threadIdx.x;
//QSIZE BlockNum = idx / Block;
//QSIZE BlockInt = idx% Block;
extern __shared__ STATE_T dprob[];
dprob[tid] = 0;
QSIZE i, j, k;
if (idx < Dim / (1 << m))
{
QSIZE index = idx;
for (i = 0; i < m; i++)
{
j = index / block[i];
k = index % block[i];
index = j * 2 * block[i] + k;
} //index 目标比特全为0
QSIZE realIdx = index + dec; //需要加的态的概率
dprob[tid] = psireal[realIdx] * psireal[realIdx] + psiimag[realIdx] * psiimag[realIdx];
__syncthreads();//树状加法
int offset = 1, mask = 1;
while (offset < THREADDIM)
{
if ((tid & mask) == 0)
{
dprob[tid] += dprob[tid + offset];
}
offset += offset;
mask = offset + mask;
__syncthreads();
}
//计算时间,记录结果,只在 thread 0(即 threadIdx.x =
//dprob[0]求和即得到最后的概率
if (tid == 0)
{
pro[bid] = dprob[0]; //再计算pro的和就得到最后的概率
}
}
}
__global__ void pmeasure_many_target(
STATE_T* psireal,
STATE_T* psiimag,
STATE_T* result,
QSIZE qnum_mask,
QSIZE result_size,
QSIZE Dim)
{
QSIZE bid = blockIdx.x;
QSIZE tid = threadIdx.x;
QSIZE idx = blockDim.x*bid + tid;
// 对每个result并行,并行个数为result_size个
// 适用于target个数多于threaddim的情况,例如对10个qubit进行pmeasure
result[idx] = 0;
if (idx < result_size)
{
for (QSIZE i = 0; i < Dim / result_size; ++i)
{
QSIZE realIdx = 0;
QSIZE copy_i = i; // 这个i是要不断移位的,所以复制一份
QSIZE copy_idx = idx; // 同理
// 下面计算realIdx
// 例如:
// qnum_mask : 00100100
// copy_i = abcdef
// copy_idx = xy
//
// realIdx 应该为 abxcdyef
// 用不断右移的digit判断mask上是0还是1
// 用flag判断一共有多少位:Dim=100000000,可以向右移动6次,如果是1,说明结束
// 用set_digit说明正在操作哪一位,每次set_digit左移一位,realIdx = set_digit * (?) + realIdx
// 如果digit & 1 == 0 说明是copy_i的序号,通过 copy_i & 1 取到最低位后,移动一位
// 如果digit & 1 == 1 说明是copy_idx的序号,同理
QSIZE set_digit = 1;
QSIZE qnum_mask_copy = qnum_mask;
int loops = 0;
for (QSIZE flag = Dim;
flag != 1;
flag >>= 1)
{
loops++;
if ((qnum_mask_copy & 1) == 0)
{
realIdx += (set_digit *(copy_i & 1));
copy_i >>= 1;
}
else
{
realIdx += (set_digit *(copy_idx & 1));
copy_idx >>= 1;
}
set_digit <<= 1;
qnum_mask_copy >>= 1;
}
result[idx] += (
psireal[realIdx] * psireal[realIdx] +
psiimag[realIdx] * psiimag[realIdx]
);
}
}
}
__global__ void pmeasure_one_target(
STATE_T* psireal,
STATE_T* psiimag,
STATE_T* result,
QSIZE qnum_mask,
QSIZE result_idx,
size_t result_dim,
QSIZE Dim)
{
QSIZE idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number
QSIZE bid = blockIdx.x, tid = threadIdx.x;
extern __shared__ STATE_T dprob[];
dprob[tid] = 0;
if (idx < (Dim>>result_dim))
{
QSIZE copy_idx = idx;
QSIZE copy_result_idx = result_idx;
// 下面计算realIdx
// 例如:
// qnum_mask : 00100100
// idx = abcdef
// result_idx = xy
//
// realIdx 应该为 abxcdyef
// 用不断右移的digit判断mask上是0还是1
// 用flag判断一共有多少位:Dim=100000000,可以向右移动6次,如果是1,说明结束
// 用set_digit说明正在操作哪一位,每次set_digit左移一位,realIdx = set_digit * (?) + realIdx
// 如果digit & 1 == 0 说明是copy_idx的序号,通过 copy_idx & 1 取到最低位后,移动一位
// 如果digit & 1 == 1 说明是copy_result_idx的序号,同理
QSIZE realIdx = 0;
QSIZE set_digit = 1;
QSIZE qnum_mask_copy = qnum_mask;
int loops = 0;
for (QSIZE flag = Dim;
flag != 1;
flag >>= 1)
{
loops++;
if ((qnum_mask_copy & 1) == 0)
{
realIdx += (set_digit *(copy_idx & 1));
copy_idx >>= 1;
}
else
{
realIdx += (set_digit *(copy_result_idx & 1));
copy_result_idx >>= 1;
}
set_digit <<= 1;
qnum_mask_copy >>= 1;
}
dprob[tid] = psireal[realIdx] * psireal[realIdx] + psiimag[realIdx] * psiimag[realIdx];
__syncthreads();
int offset = 1, mask = 1;
while (offset < THREADDIM)
{
if ((tid & mask) == 0)
{
dprob[tid] += dprob[tid + offset];
}
offset += offset;
mask = offset + mask;
__syncthreads();
}
if (tid == 0)
{
result[bid] = dprob[0];
}
}
}
double randGenerator()
{
int ia = 16807, im = 2147483647, iq = 127773, ir = 2836; /*difine constant number in 16807 generator.*/
time_t rawtime;
struct tm * timeinfo;
time(&rawtime);
timeinfo = localtime(&rawtime);
static int irandseed = timeinfo->tm_year + 70 *
(timeinfo->tm_mon + 1 + 12 *
(timeinfo->tm_mday + 31 *
(timeinfo->tm_hour + 23 *
(timeinfo->tm_min + 59 * timeinfo->tm_sec))));
static int irandnewseed;
if (ia*(irandseed%iq) - ir * (irandseed / iq) >= 0)
irandnewseed = ia * (irandseed%iq) - ir * (irandseed / iq);
else
irandnewseed = ia * (irandseed%iq) - ir * (irandseed / iq) + im;
irandseed = irandnewseed;
return (double)irandnewseed / im;
}
} // namespace gpu
|
94fbb68a52518b61af7c1b1593511344161adf8f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <string>
#include <math_functions.h>
#define TILE_WIDTH 32
__global__ void matrixMulKernelTiled(float *A, float *B, float *C, int N){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int col = bx * TILE_WIDTH + tx;
int row = by * TILE_WIDTH + ty;
float Pvalue = 0.0;
for(int m = 0; m < (TILE_WIDTH + N - 1)/TILE_WIDTH; ++m){
if ((m*TILE_WIDTH + tx) < N && row < N)
Mds[ty][tx] = A[row*N + m*TILE_WIDTH + tx];
else
Mds[ty][tx] = 0.0;
if ((m*TILE_WIDTH + ty) < N && col < N)
Nds[ty][tx] = B[(m*TILE_WIDTH + ty) * N + col];
else
Nds[ty][tx] = 0.0;
__syncthreads();
for(int k = 0; k < TILE_WIDTH; ++k)
Pvalue += Mds[ty][k] * Nds[k][tx];
__syncthreads();
}
if (row < N && col < N)
C[row*N+col] = Pvalue;
}
__global__ void matrixMultGPU (float *A, float *B, float *C, int N){
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
float ans;
if(col < N && row < N){
ans = 0.0;
for(int k=0;k<N;k++)
ans += A[row*N+k] * B[k*N+col];
C[row*N+col] = ans;
}
}
void matrixMultCPU(float *A, float *B, float *C, int N){
float ans;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
ans=0.0;
for(int k=0;k<N;k++)
ans += A[i*N+k]*B[k*N+j];
C[i*N+j] = ans;
}
}
}
std::string testValues(float *A, float *B, int N){
for(int i = 0; i < N; ++i)
for(int j = 0; j < N; ++j)
if(A[(i*N)+j]!=B[(i*N)+j]){
return "Wrong";
}
return "Correct";
}
void printMatrix(float *A, int N){
for(int i=0;i<N*N;i++){
if(i%N == 0)
printf("\n");
printf("%f; ",A[i]);
}
printf("\n---------\n");
}
void serial(float *A, float *B, float *C, double &time, int N) {
/*******************************HOST********************************/
clock_t tic = clock();
matrixMultCPU(A,B,C, N);
clock_t toc = clock();
time = (double)(toc - tic) / CLOCKS_PER_SEC;
/*****************************END HOST******************************/
}
void checkError(hipError_t error, std::string type) {
if(error != hipSuccess){
printf("Error in %s\n", type.c_str());
exit(0);
}
}
void cuda(float *A, float *B, float *C, double &time, float size, int N) {
hipError_t error = hipSuccess;
float *d_A, *d_B, *d_C;
error = hipMalloc((void**)&d_A,size);
checkError(error, "hipMalloc for d_A (cuda)");
error = hipMalloc((void**)&d_B,size);
checkError(error, "hipMalloc for d_B (cuda)");
error = hipMalloc((void**)&d_C,size);
checkError(error, "hipMalloc for d_C (cuda)");
/*******************************GPU********************************/
clock_t tic = clock();
error = hipMemcpy(d_A,A,size,hipMemcpyHostToDevice);
checkError(error, "hipMemcpy for d_A (cuda)");
error = hipMemcpy(d_B,B,size,hipMemcpyHostToDevice);
checkError(error, "hipMemcpy for d_B (cuda)");
dim3 dimBlock(32,32,1);
dim3 dimGrid(ceil(N/float(dimBlock.x)),ceil(N/float(dimBlock.y)),1);
hipLaunchKernelGGL(( matrixMultGPU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A,d_B,d_C,N);
hipDeviceSynchronize();
error = hipMemcpy(C,d_C,size,hipMemcpyDeviceToHost);
checkError(error, "hipMemcpy for C (cuda)");
clock_t toc = clock();
time = (double)(toc - tic) / CLOCKS_PER_SEC;
/*****************************GPU END******************************/
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
void cuda_tiled(float *A, float *B, float *C, double &time, float size, int N) {
hipError_t error = hipSuccess;
float *d_A, *d_B, *d_C;
error = hipMalloc((void**)&d_A,size);
checkError(error, "hipMalloc for d_A (cuda with tiling)");
error = hipMalloc((void**)&d_B,size);
checkError(error, "hipMalloc for d_B (cuda with tiling)");
error = hipMalloc((void**)&d_C,size);
checkError(error, "hipMalloc for d_C (cuda with tiling)");
/*******************************GPU TILED********************************/
clock_t tic = clock();
error = hipMemcpy(d_A,A,size,hipMemcpyHostToDevice);
checkError(error, "hipMemcpy for d_A (cuda with tiling)");
error = hipMemcpy(d_B,B,size,hipMemcpyHostToDevice);
checkError(error, "hipMemcpy for d_B (cuda with tiling)");
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
dim3 dimGrid(ceil(N/float(dimBlock.x)),ceil(N/float(dimBlock.y)),1);
hipLaunchKernelGGL(( matrixMulKernelTiled), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A,d_B,d_C,N);
hipDeviceSynchronize();
error = hipMemcpy(C,d_C,size,hipMemcpyDeviceToHost);
checkError(error, "hipMemcpy for C (cuda with tiling)");
clock_t toc = clock();
time = (double)(toc - tic) / CLOCKS_PER_SEC;
/*****************************GPU TILED END******************************/
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
int main(int argc, char **argv){
float *A, *B, *C1, *C2, *C3;
double CPU, GPU, GPU_tiled, acc1, acc2;
CPU = GPU = GPU_tiled = acc1 = acc2 = 0.0;
// Meaning of positions: {serial, cuda, cuda+tiling}
bool op[] = {false, false, false};
if(argc < 2) {
printf("No size given\n");
return -1;
}
int N = atoi(argv[1]);
for (int i = 2; i < argc; i++) {
std::string s = argv[i];
if (s == "s")
op[0] = true;
else if (s == "c")
op[1] = true;
else if (s == "ct")
op[2] = true;
}
float size = N*N*sizeof(float);
A = (float*)malloc(size);
B = (float*)malloc(size);
C1 = (float*)malloc(size);
C2 = (float*)malloc(size);
C3 = (float*)malloc(size);
for(int i=0;i<N*N;i++){
A[i]=1;
B[i]=2;
}
if (op[0]) serial(A, B, C1, CPU, N);
if (op[1]) cuda(A, B, C2, GPU, size, N);
if (op[2]) cuda_tiled(A, B, C3, GPU_tiled, size, N);
if (op[0]) {
printf(" %f |", CPU);
}
else printf(" - |");
if (op[1]) {
if (op[0]) {
acc1 = CPU / GPU;
std::string r1 = testValues(C1, C2, N);
printf(" %f | %f | %s |", GPU, acc1, r1.c_str());
}
else printf(" %f | - | - |", GPU);
}
else printf(" - | - | - |");
if (op[2]) {
if (op[1]) {
acc2 = GPU / GPU_tiled;
std::string r1 = testValues(C2, C3, N);
printf(" %f | %f | %s |\n", GPU_tiled, acc2, r1.c_str());
}
else if (op[0]) {
acc1 = CPU / GPU_tiled;
std::string r1 = testValues(C1, C3, N);
printf(" %f | %f | %s |\n", GPU_tiled, acc1, r1.c_str());
}
else printf(" %f | - | - |\n", GPU_tiled);
}
else printf(" - | - | - |\n");
free(A);
free(B);
free(C1);
free(C2);
free(C3);
return 0;
}
| 94fbb68a52518b61af7c1b1593511344161adf8f.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#include <string>
#include <math_functions.h>
#define TILE_WIDTH 32
__global__ void matrixMulKernelTiled(float *A, float *B, float *C, int N){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int col = bx * TILE_WIDTH + tx;
int row = by * TILE_WIDTH + ty;
float Pvalue = 0.0;
for(int m = 0; m < (TILE_WIDTH + N - 1)/TILE_WIDTH; ++m){
if ((m*TILE_WIDTH + tx) < N && row < N)
Mds[ty][tx] = A[row*N + m*TILE_WIDTH + tx];
else
Mds[ty][tx] = 0.0;
if ((m*TILE_WIDTH + ty) < N && col < N)
Nds[ty][tx] = B[(m*TILE_WIDTH + ty) * N + col];
else
Nds[ty][tx] = 0.0;
__syncthreads();
for(int k = 0; k < TILE_WIDTH; ++k)
Pvalue += Mds[ty][k] * Nds[k][tx];
__syncthreads();
}
if (row < N && col < N)
C[row*N+col] = Pvalue;
}
__global__ void matrixMultGPU (float *A, float *B, float *C, int N){
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
float ans;
if(col < N && row < N){
ans = 0.0;
for(int k=0;k<N;k++)
ans += A[row*N+k] * B[k*N+col];
C[row*N+col] = ans;
}
}
void matrixMultCPU(float *A, float *B, float *C, int N){
float ans;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
ans=0.0;
for(int k=0;k<N;k++)
ans += A[i*N+k]*B[k*N+j];
C[i*N+j] = ans;
}
}
}
std::string testValues(float *A, float *B, int N){
for(int i = 0; i < N; ++i)
for(int j = 0; j < N; ++j)
if(A[(i*N)+j]!=B[(i*N)+j]){
return "Wrong";
}
return "Correct";
}
void printMatrix(float *A, int N){
for(int i=0;i<N*N;i++){
if(i%N == 0)
printf("\n");
printf("%f; ",A[i]);
}
printf("\n---------\n");
}
void serial(float *A, float *B, float *C, double &time, int N) {
/*******************************HOST********************************/
clock_t tic = clock();
matrixMultCPU(A,B,C, N);
clock_t toc = clock();
time = (double)(toc - tic) / CLOCKS_PER_SEC;
/*****************************END HOST******************************/
}
void checkError(cudaError_t error, std::string type) {
if(error != cudaSuccess){
printf("Error in %s\n", type.c_str());
exit(0);
}
}
void cuda(float *A, float *B, float *C, double &time, float size, int N) {
cudaError_t error = cudaSuccess;
float *d_A, *d_B, *d_C;
error = cudaMalloc((void**)&d_A,size);
checkError(error, "cudaMalloc for d_A (cuda)");
error = cudaMalloc((void**)&d_B,size);
checkError(error, "cudaMalloc for d_B (cuda)");
error = cudaMalloc((void**)&d_C,size);
checkError(error, "cudaMalloc for d_C (cuda)");
/*******************************GPU********************************/
clock_t tic = clock();
error = cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
checkError(error, "cudaMemcpy for d_A (cuda)");
error = cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice);
checkError(error, "cudaMemcpy for d_B (cuda)");
dim3 dimBlock(32,32,1);
dim3 dimGrid(ceil(N/float(dimBlock.x)),ceil(N/float(dimBlock.y)),1);
matrixMultGPU<<<dimGrid,dimBlock>>>(d_A,d_B,d_C,N);
cudaDeviceSynchronize();
error = cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost);
checkError(error, "cudaMemcpy for C (cuda)");
clock_t toc = clock();
time = (double)(toc - tic) / CLOCKS_PER_SEC;
/*****************************GPU END******************************/
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
void cuda_tiled(float *A, float *B, float *C, double &time, float size, int N) {
cudaError_t error = cudaSuccess;
float *d_A, *d_B, *d_C;
error = cudaMalloc((void**)&d_A,size);
checkError(error, "cudaMalloc for d_A (cuda with tiling)");
error = cudaMalloc((void**)&d_B,size);
checkError(error, "cudaMalloc for d_B (cuda with tiling)");
error = cudaMalloc((void**)&d_C,size);
checkError(error, "cudaMalloc for d_C (cuda with tiling)");
/*******************************GPU TILED********************************/
clock_t tic = clock();
error = cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
checkError(error, "cudaMemcpy for d_A (cuda with tiling)");
error = cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice);
checkError(error, "cudaMemcpy for d_B (cuda with tiling)");
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
dim3 dimGrid(ceil(N/float(dimBlock.x)),ceil(N/float(dimBlock.y)),1);
matrixMulKernelTiled<<<dimGrid,dimBlock>>>(d_A,d_B,d_C,N);
cudaDeviceSynchronize();
error = cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost);
checkError(error, "cudaMemcpy for C (cuda with tiling)");
clock_t toc = clock();
time = (double)(toc - tic) / CLOCKS_PER_SEC;
/*****************************GPU TILED END******************************/
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main(int argc, char **argv){
float *A, *B, *C1, *C2, *C3;
double CPU, GPU, GPU_tiled, acc1, acc2;
CPU = GPU = GPU_tiled = acc1 = acc2 = 0.0;
// Meaning of positions: {serial, cuda, cuda+tiling}
bool op[] = {false, false, false};
if(argc < 2) {
printf("No size given\n");
return -1;
}
int N = atoi(argv[1]);
for (int i = 2; i < argc; i++) {
std::string s = argv[i];
if (s == "s")
op[0] = true;
else if (s == "c")
op[1] = true;
else if (s == "ct")
op[2] = true;
}
float size = N*N*sizeof(float);
A = (float*)malloc(size);
B = (float*)malloc(size);
C1 = (float*)malloc(size);
C2 = (float*)malloc(size);
C3 = (float*)malloc(size);
for(int i=0;i<N*N;i++){
A[i]=1;
B[i]=2;
}
if (op[0]) serial(A, B, C1, CPU, N);
if (op[1]) cuda(A, B, C2, GPU, size, N);
if (op[2]) cuda_tiled(A, B, C3, GPU_tiled, size, N);
if (op[0]) {
printf(" %f |", CPU);
}
else printf(" - |");
if (op[1]) {
if (op[0]) {
acc1 = CPU / GPU;
std::string r1 = testValues(C1, C2, N);
printf(" %f | %f | %s |", GPU, acc1, r1.c_str());
}
else printf(" %f | - | - |", GPU);
}
else printf(" - | - | - |");
if (op[2]) {
if (op[1]) {
acc2 = GPU / GPU_tiled;
std::string r1 = testValues(C2, C3, N);
printf(" %f | %f | %s |\n", GPU_tiled, acc2, r1.c_str());
}
else if (op[0]) {
acc1 = CPU / GPU_tiled;
std::string r1 = testValues(C1, C3, N);
printf(" %f | %f | %s |\n", GPU_tiled, acc1, r1.c_str());
}
else printf(" %f | - | - |\n", GPU_tiled);
}
else printf(" - | - | - |\n");
free(A);
free(B);
free(C1);
free(C2);
free(C3);
return 0;
}
|
068e09f099dee5fe9661b30ad798c598dfe0f9be.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by daisy on 15.11.17.
//
#pragma once
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "stdio.h"
#include "device_launch_parameters.h"
#include "MatrixRW.h"
#include "MatrixMulHost.h"
#define TILE_WIDTH 4
const int CrowN=16;//16 agents
const int CcolN=4*16;//4 neurons,16 is populationSize. could calculate whole population's output together
const int ArowN=CrowN;
const int AcolN=16*3+1*3;//16 preys,1 predator
const int BrowN=AcolN;
const int BcolN=CcolN;
const int gridY=(CrowN+TILE_WIDTH-1)/TILE_WIDTH;
const int gridX=(CcolN+TILE_WIDTH-1)/TILE_WIDTH;
int main(void){
float* A;
float* B;
float* C;
A= ( float * ) malloc ( ArowN * AcolN * sizeof ( float ));
B = ( float *) malloc ( BrowN * BcolN * sizeof ( float ));
C = ( float *) malloc ( CrowN * CcolN * sizeof ( float ));
ReadMatrix (A, width );
ReadMatrix (N, width );
MatrixMulHost(A, B, C, ArowN, AcolN, BrowN, BcolN, CrowN, CcolN , TILE_WIDTH );
printf ("\n");
PrintMatrix (M, width );
printf ("\n");
PrintMatrix (N, width );
printf ("\n");
PrintMatrix (C, width );
free (M);
free (N);
free (C);
} | 068e09f099dee5fe9661b30ad798c598dfe0f9be.cu | //
// Created by daisy on 15.11.17.
//
#pragma once
#include "cuda.h"
#include "cuda_runtime.h"
#include "stdio.h"
#include "device_launch_parameters.h"
#include "MatrixRW.h"
#include "MatrixMulHost.h"
#define TILE_WIDTH 4
const int CrowN=16;//16 agents
const int CcolN=4*16;//4 neurons,16 is populationSize. could calculate whole population's output together
const int ArowN=CrowN;
const int AcolN=16*3+1*3;//16 preys,1 predator
const int BrowN=AcolN;
const int BcolN=CcolN;
const int gridY=(CrowN+TILE_WIDTH-1)/TILE_WIDTH;
const int gridX=(CcolN+TILE_WIDTH-1)/TILE_WIDTH;
int main(void){
float* A;
float* B;
float* C;
A= ( float * ) malloc ( ArowN * AcolN * sizeof ( float ));
B = ( float *) malloc ( BrowN * BcolN * sizeof ( float ));
C = ( float *) malloc ( CrowN * CcolN * sizeof ( float ));
ReadMatrix (A, width );
ReadMatrix (N, width );
MatrixMulHost(A, B, C, ArowN, AcolN, BrowN, BcolN, CrowN, CcolN , TILE_WIDTH );
printf ("\n");
PrintMatrix (M, width );
printf ("\n");
PrintMatrix (N, width );
printf ("\n");
PrintMatrix (C, width );
free (M);
free (N);
free (C);
} |
2468252e86ed4c41585f7be47393584874f9f195.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int columns = 1;
int rows = XSIZE;
float *mat1 = NULL;
hipMalloc(&mat1, XSIZE*YSIZE);
float *matanswer = NULL;
hipMalloc(&matanswer, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_1), dim3(gridBlock),dim3(threadBlock), 0, 0, columns,rows,mat1,matanswer);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_1), dim3(gridBlock),dim3(threadBlock), 0, 0, columns,rows,mat1,matanswer);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_1), dim3(gridBlock),dim3(threadBlock), 0, 0, columns,rows,mat1,matanswer);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2468252e86ed4c41585f7be47393584874f9f195.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int columns = 1;
int rows = XSIZE;
float *mat1 = NULL;
cudaMalloc(&mat1, XSIZE*YSIZE);
float *matanswer = NULL;
cudaMalloc(&matanswer, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_1<<<gridBlock,threadBlock>>>(columns,rows,mat1,matanswer);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_1<<<gridBlock,threadBlock>>>(columns,rows,mat1,matanswer);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_1<<<gridBlock,threadBlock>>>(columns,rows,mat1,matanswer);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e378f4f6c054fa680174b705ba1cc4650206d4a1.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| e378f4f6c054fa680174b705ba1cc4650206d4a1.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
da8ccced89e4e0979ad342fa850a086ef0c6ff76.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define BLOCKSIZE 128
/*******************/
/* iDivUp FUNCTION */
/*******************/
int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/********************/
/* CUDA ERROR CHECK */
/********************/
// --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) { exit(code); }
}
}
void gpuErrchk(hipError_t ans) { gpuAssert((ans), __FILE__, __LINE__); }
/**********************************/
/* adjacentDifferencesCPU FUNCTION */
/**********************************/
void adjacentDifferencesCPU(int *h_a, int *h_b, int N)
{
for (int k = 1; k < N; k++) {
int a_i = h_a[k];
int a_i_minus_one = h_a[k - 1];
h_b[k] = a_i - a_i_minus_one;
}
}
/**********************************/
/* adjacentDifferencesGPU FUNCTION */
/**********************************/
__global__ void adjacentDifferencesGPU(int *d_a, int *d_b, int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if ((tid >= N) || (tid == 0)) return;
int a_i = d_a[tid];
int a_i_minus_one = d_a[tid - 1];
d_b[tid] = a_i - a_i_minus_one;
}
/****************************************/
/* adjacentDifferencesSharedGPU FUNCTION */
/****************************************/
__global__ void adjacentDifferencesSharedGPU(int *d_a, int *d_b, int N)
{
// --- Shorthand for threadIdx.x
int tidx = threadIdx.x;
int tid = tidx + blockDim.x * blockIdx.x;
// --- Allocate a __shared__ array, one element per thread
__shared__ int s_data[BLOCKSIZE];
// --- Each thread reads one element to s_data
s_data[tidx] = d_a[tid];
// --- Avoid race condition: ensure all loads complete before continuing
__syncthreads();
if (tidx > 0)
d_b[tid] = s_data[tidx] - s_data[tidx - 1];
else if (tid > 0)
{
// --- Handle thread block boundary
d_b[tid] = s_data[tidx] - d_a[tid - 1];
}
}
__global__ void adjacentDifferencesSharedWithHaloGPU(int *d_a, int *d_b, int N)
{
// --- Allocate a __shared__ array, one element per thread
__shared__ int s_data[BLOCKSIZE + 1];
// --- Shorthand for threadIdx.x
int gindexx = threadIdx.x + blockDim.x * blockIdx.x;
int lindexx = threadIdx.x + 1;
// --- Each thread reads one element to s_data
s_data[lindexx] = d_a[gindexx];
if (threadIdx.x == 0) {
s_data[0] = (((gindexx - 1) >= 0) && (gindexx <= N)) ? d_a[gindexx - 1] : 0;
}
// --- Avoid race condition: ensure all loads complete before continuing
__syncthreads();
if (gindexx > 0) d_b[gindexx] = s_data[lindexx] - s_data[lindexx - 1];
}
/************************************************/
/* adjacentDifferencesExternalSharedGPU FUNCTION */
/************************************************/
__global__ void adjacentDifferencesExternalSharedGPU(int *d_a, int *d_b, int N)
{
// --- Shorthand for threadIdx.x
int tidx = threadIdx.x;
int tid = tidx + blockDim.x * blockIdx.x;
// --- Allocate a __shared__ array, one element per thread
extern __shared__ int s_data[];
// --- Each thread reads one element to s_data
s_data[tidx] = d_a[tid];
// --- Avoid race condition: ensure all loads complete before continuing
__syncthreads();
if (tidx > 0)
d_b[tid] = s_data[tidx] - s_data[tidx - 1];
else if (tid > 0)
{
// --- Handle thread block boundary
d_b[tid] = s_data[tidx] - d_a[tid - 1];
}
}
/********/
/* MAIN */
/********/
int main() {
const int N = 256;
// --- Allocating host memory for data and results
int *h_a = (int *)malloc(N * sizeof(int));
int *h_b = (int *)malloc(N * sizeof(int));
int *h_b_device = (int *)malloc(N * sizeof(int));
// --- Allocating device memory for data and results
int *d_a, *d_b;
gpuErrchk(hipMalloc(&d_a, N * sizeof(int)));
gpuErrchk(hipMalloc(&d_b, N * sizeof(int)));
// --- Filling the input vectors on host memory
for (int k = 0; k < N; k++) {
h_a[k] = k;
}
// --- Moving data from host to device
gpuErrchk(hipMemcpy(d_a, h_a, N * sizeof(int), hipMemcpyHostToDevice));
adjacentDifferencesCPU(h_a, h_b, N);
//adjacentDifferencesGPU << <iDivUp(N, BLOCKSIZE), BLOCKSIZE >> >(d_a, d_b, N);
//adjacentDifferencesSharedGPU << <iDivUp(N, BLOCKSIZE), BLOCKSIZE >> >(d_a, d_b, N);
adjacentDifferencesSharedWithHaloGPU << <iDivUp(N, BLOCKSIZE), BLOCKSIZE >> >(d_a, d_b, N);
//adjacentDifferencesExternalSharedGPU << <iDivUp(N, BLOCKSIZE), BLOCKSIZE, BLOCKSIZE * sizeof(int) >> >(d_a, d_b, N);
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipMemcpy(h_b_device, d_b, N * sizeof(int), hipMemcpyDeviceToHost));
for (int k = 1; k < N; k++)
if (h_b_device[k] != h_b[k]) {
printf("Host and device results do not match for k = %d: h_c[%d] = %d; h_c_device[%d] = %d\n", k, k, h_b[k], k, h_b_device[k]);
return 0;
}
printf("No errors found.\n");
return 0;
}
| da8ccced89e4e0979ad342fa850a086ef0c6ff76.cu | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define BLOCKSIZE 128
/*******************/
/* iDivUp FUNCTION */
/*******************/
int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/********************/
/* CUDA ERROR CHECK */
/********************/
// --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) { exit(code); }
}
}
void gpuErrchk(cudaError_t ans) { gpuAssert((ans), __FILE__, __LINE__); }
/**********************************/
/* adjacentDifferencesCPU FUNCTION */
/**********************************/
void adjacentDifferencesCPU(int *h_a, int *h_b, int N)
{
for (int k = 1; k < N; k++) {
int a_i = h_a[k];
int a_i_minus_one = h_a[k - 1];
h_b[k] = a_i - a_i_minus_one;
}
}
/**********************************/
/* adjacentDifferencesGPU FUNCTION */
/**********************************/
__global__ void adjacentDifferencesGPU(int *d_a, int *d_b, int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if ((tid >= N) || (tid == 0)) return;
int a_i = d_a[tid];
int a_i_minus_one = d_a[tid - 1];
d_b[tid] = a_i - a_i_minus_one;
}
/****************************************/
/* adjacentDifferencesSharedGPU FUNCTION */
/****************************************/
__global__ void adjacentDifferencesSharedGPU(int *d_a, int *d_b, int N)
{
// --- Shorthand for threadIdx.x
int tidx = threadIdx.x;
int tid = tidx + blockDim.x * blockIdx.x;
// --- Allocate a __shared__ array, one element per thread
__shared__ int s_data[BLOCKSIZE];
// --- Each thread reads one element to s_data
s_data[tidx] = d_a[tid];
// --- Avoid race condition: ensure all loads complete before continuing
__syncthreads();
if (tidx > 0)
d_b[tid] = s_data[tidx] - s_data[tidx - 1];
else if (tid > 0)
{
// --- Handle thread block boundary
d_b[tid] = s_data[tidx] - d_a[tid - 1];
}
}
__global__ void adjacentDifferencesSharedWithHaloGPU(int *d_a, int *d_b, int N)
{
// --- Allocate a __shared__ array, one element per thread
__shared__ int s_data[BLOCKSIZE + 1];
// --- Shorthand for threadIdx.x
int gindexx = threadIdx.x + blockDim.x * blockIdx.x;
int lindexx = threadIdx.x + 1;
// --- Each thread reads one element to s_data
s_data[lindexx] = d_a[gindexx];
if (threadIdx.x == 0) {
s_data[0] = (((gindexx - 1) >= 0) && (gindexx <= N)) ? d_a[gindexx - 1] : 0;
}
// --- Avoid race condition: ensure all loads complete before continuing
__syncthreads();
if (gindexx > 0) d_b[gindexx] = s_data[lindexx] - s_data[lindexx - 1];
}
/************************************************/
/* adjacentDifferencesExternalSharedGPU FUNCTION */
/************************************************/
__global__ void adjacentDifferencesExternalSharedGPU(int *d_a, int *d_b, int N)
{
// --- Shorthand for threadIdx.x
int tidx = threadIdx.x;
int tid = tidx + blockDim.x * blockIdx.x;
// --- Allocate a __shared__ array, one element per thread
extern __shared__ int s_data[];
// --- Each thread reads one element to s_data
s_data[tidx] = d_a[tid];
// --- Avoid race condition: ensure all loads complete before continuing
__syncthreads();
if (tidx > 0)
d_b[tid] = s_data[tidx] - s_data[tidx - 1];
else if (tid > 0)
{
// --- Handle thread block boundary
d_b[tid] = s_data[tidx] - d_a[tid - 1];
}
}
/********/
/* MAIN */
/********/
int main() {
const int N = 256;
// --- Allocating host memory for data and results
int *h_a = (int *)malloc(N * sizeof(int));
int *h_b = (int *)malloc(N * sizeof(int));
int *h_b_device = (int *)malloc(N * sizeof(int));
// --- Allocating device memory for data and results
int *d_a, *d_b;
gpuErrchk(cudaMalloc(&d_a, N * sizeof(int)));
gpuErrchk(cudaMalloc(&d_b, N * sizeof(int)));
// --- Filling the input vectors on host memory
for (int k = 0; k < N; k++) {
h_a[k] = k;
}
// --- Moving data from host to device
gpuErrchk(cudaMemcpy(d_a, h_a, N * sizeof(int), cudaMemcpyHostToDevice));
adjacentDifferencesCPU(h_a, h_b, N);
//adjacentDifferencesGPU << <iDivUp(N, BLOCKSIZE), BLOCKSIZE >> >(d_a, d_b, N);
//adjacentDifferencesSharedGPU << <iDivUp(N, BLOCKSIZE), BLOCKSIZE >> >(d_a, d_b, N);
adjacentDifferencesSharedWithHaloGPU << <iDivUp(N, BLOCKSIZE), BLOCKSIZE >> >(d_a, d_b, N);
//adjacentDifferencesExternalSharedGPU << <iDivUp(N, BLOCKSIZE), BLOCKSIZE, BLOCKSIZE * sizeof(int) >> >(d_a, d_b, N);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaMemcpy(h_b_device, d_b, N * sizeof(int), cudaMemcpyDeviceToHost));
for (int k = 1; k < N; k++)
if (h_b_device[k] != h_b[k]) {
printf("Host and device results do not match for k = %d: h_c[%d] = %d; h_c_device[%d] = %d\n", k, k, h_b[k], k, h_b_device[k]);
return 0;
}
printf("No errors found.\n");
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.