hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
e30f23bdb04371ca87c3242487b39e9b92283de2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include <opencv2/cudev/ptr2d/texture.hpp>
#include <limits.h>
namespace cv { namespace cuda { namespace device
{
namespace stereobm
{
//////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////// Stereo BM ////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
#define ROWSperTHREAD 21 // the number of rows a thread will process
#define BLOCK_W 128 // the thread block width (464)
#define N_DISPARITIES 8
#define STEREO_MIND 0 // The minimum d range to check
#define STEREO_DISP_STEP N_DISPARITIES // the d step, must be <= 1 to avoid aliasing
__device__ __forceinline__ int SQ(int a)
{
return a * a;
}
template<int RADIUS>
__device__ unsigned int CalcSSD(volatile unsigned int *col_ssd_cache, volatile unsigned int *col_ssd, const int X, int cwidth)
{
unsigned int cache = 0;
unsigned int cache2 = 0;
if (X < cwidth - RADIUS)
{
for(int i = 1; i <= RADIUS; i++)
cache += col_ssd[i];
}
col_ssd_cache[0] = cache;
__syncthreads();
if (X < cwidth - RADIUS)
{
if (threadIdx.x < BLOCK_W - RADIUS)
cache2 = col_ssd_cache[RADIUS];
else
for(int i = RADIUS + 1; i < (2 * RADIUS + 1); i++)
cache2 += col_ssd[i];
}
return col_ssd[0] + cache + cache2;
}
template<int RADIUS>
__device__ uint2 MinSSD(volatile unsigned int *col_ssd_cache, volatile unsigned int *col_ssd, const int X, int cwidth, unsigned int* ssd)
{
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
ssd[0] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 0 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[1] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 1 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[2] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 2 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[3] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 3 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[4] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 4 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[5] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 5 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[6] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 6 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[7] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 7 * (BLOCK_W + 2 * RADIUS), X, cwidth);
int mssd = ::min(::min(::min(ssd[0], ssd[1]), ::min(ssd[4], ssd[5])), ::min(::min(ssd[2], ssd[3]), ::min(ssd[6], ssd[7])));
int bestIdx = 0;
for (int i = 0; i < N_DISPARITIES; i++)
{
if (mssd == ssd[i])
bestIdx = i;
}
return make_uint2(mssd, bestIdx);
}
template<int RADIUS>
__device__ void StepDown(int idx1, int idx2, unsigned char* imageL, unsigned char* imageR, int d, volatile unsigned int *col_ssd)
{
unsigned char leftPixel1;
unsigned char leftPixel2;
unsigned char rightPixel1[8];
unsigned char rightPixel2[8];
unsigned int diff1, diff2;
leftPixel1 = imageL[idx1];
leftPixel2 = imageL[idx2];
idx1 = idx1 - d;
idx2 = idx2 - d;
rightPixel1[7] = imageR[idx1 - 7];
rightPixel1[0] = imageR[idx1 - 0];
rightPixel1[1] = imageR[idx1 - 1];
rightPixel1[2] = imageR[idx1 - 2];
rightPixel1[3] = imageR[idx1 - 3];
rightPixel1[4] = imageR[idx1 - 4];
rightPixel1[5] = imageR[idx1 - 5];
rightPixel1[6] = imageR[idx1 - 6];
rightPixel2[7] = imageR[idx2 - 7];
rightPixel2[0] = imageR[idx2 - 0];
rightPixel2[1] = imageR[idx2 - 1];
rightPixel2[2] = imageR[idx2 - 2];
rightPixel2[3] = imageR[idx2 - 3];
rightPixel2[4] = imageR[idx2 - 4];
rightPixel2[5] = imageR[idx2 - 5];
rightPixel2[6] = imageR[idx2 - 6];
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
diff1 = leftPixel1 - rightPixel1[0];
diff2 = leftPixel2 - rightPixel2[0];
col_ssd[0 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[1];
diff2 = leftPixel2 - rightPixel2[1];
col_ssd[1 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[2];
diff2 = leftPixel2 - rightPixel2[2];
col_ssd[2 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[3];
diff2 = leftPixel2 - rightPixel2[3];
col_ssd[3 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[4];
diff2 = leftPixel2 - rightPixel2[4];
col_ssd[4 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[5];
diff2 = leftPixel2 - rightPixel2[5];
col_ssd[5 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[6];
diff2 = leftPixel2 - rightPixel2[6];
col_ssd[6 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[7];
diff2 = leftPixel2 - rightPixel2[7];
col_ssd[7 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
}
template<int RADIUS>
__device__ void InitColSSD(int x_tex, int y_tex, int im_pitch, unsigned char* imageL, unsigned char* imageR, int d, volatile unsigned int *col_ssd)
{
unsigned char leftPixel1;
int idx;
unsigned int diffa[] = {0, 0, 0, 0, 0, 0, 0, 0};
for(int i = 0; i < (2 * RADIUS + 1); i++)
{
idx = y_tex * im_pitch + x_tex;
leftPixel1 = imageL[idx];
idx = idx - d;
diffa[0] += SQ(leftPixel1 - imageR[idx - 0]);
diffa[1] += SQ(leftPixel1 - imageR[idx - 1]);
diffa[2] += SQ(leftPixel1 - imageR[idx - 2]);
diffa[3] += SQ(leftPixel1 - imageR[idx - 3]);
diffa[4] += SQ(leftPixel1 - imageR[idx - 4]);
diffa[5] += SQ(leftPixel1 - imageR[idx - 5]);
diffa[6] += SQ(leftPixel1 - imageR[idx - 6]);
diffa[7] += SQ(leftPixel1 - imageR[idx - 7]);
y_tex += 1;
}
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
col_ssd[0 * (BLOCK_W + 2 * RADIUS)] = diffa[0];
col_ssd[1 * (BLOCK_W + 2 * RADIUS)] = diffa[1];
col_ssd[2 * (BLOCK_W + 2 * RADIUS)] = diffa[2];
col_ssd[3 * (BLOCK_W + 2 * RADIUS)] = diffa[3];
col_ssd[4 * (BLOCK_W + 2 * RADIUS)] = diffa[4];
col_ssd[5 * (BLOCK_W + 2 * RADIUS)] = diffa[5];
col_ssd[6 * (BLOCK_W + 2 * RADIUS)] = diffa[6];
col_ssd[7 * (BLOCK_W + 2 * RADIUS)] = diffa[7];
}
template<int RADIUS>
__global__ void stereoKernel(unsigned char *left, unsigned char *right, size_t img_step, PtrStepb disp, int maxdisp,
int uniquenessRatio, unsigned int* cminSSDImage, size_t cminSSD_step, int cwidth, int cheight)
{
extern __shared__ unsigned int col_ssd_cache[];
uint line_ssds[2 + N_DISPARITIES]; // +2 - tail of previous batch for accurate uniquenessRatio check
uint* batch_ssds = line_ssds + 2;
uint line_ssd_tails[3*ROWSperTHREAD];
uchar uniqueness_approved[ROWSperTHREAD];
uchar local_disparity[ROWSperTHREAD];
volatile unsigned int *col_ssd = col_ssd_cache + BLOCK_W + threadIdx.x;
volatile unsigned int *col_ssd_extra = threadIdx.x < (2 * RADIUS) ? col_ssd + BLOCK_W : 0;
const int X = (blockIdx.x * BLOCK_W + threadIdx.x + maxdisp + RADIUS);
const int Y = (blockIdx.y * ROWSperTHREAD + RADIUS);
unsigned int* minSSDImage = cminSSDImage + X + Y * cminSSD_step;
unsigned char* disparImage = disp.data + X + Y * disp.step;
float thresh_scale;
int end_row = ::min(ROWSperTHREAD, cheight - Y - RADIUS);
int y_tex;
int x_tex = X - RADIUS;
if (x_tex >= cwidth)
return;
for(int i = 0; i < ROWSperTHREAD; i++)
local_disparity[i] = 0;
for(int i = 0; i < 3*ROWSperTHREAD; i++)
{
line_ssd_tails[i] = UINT_MAX;
}
if (uniquenessRatio > 0)
{
batch_ssds[6] = UINT_MAX;
batch_ssds[7] = UINT_MAX;
thresh_scale = (1.0 + uniquenessRatio / 100.0f);
for(int i = 0; i < ROWSperTHREAD; i++)
{
uniqueness_approved[i] = 1;
}
}
for(int d = STEREO_MIND; d < maxdisp; d += STEREO_DISP_STEP)
{
y_tex = Y - RADIUS;
InitColSSD<RADIUS>(x_tex, y_tex, img_step, left, right, d, col_ssd);
if (col_ssd_extra != nullptr)
if (x_tex + BLOCK_W < cwidth)
InitColSSD<RADIUS>(x_tex + BLOCK_W, y_tex, img_step, left, right, d, col_ssd_extra);
__syncthreads(); //before MinSSD function
if (Y < cheight - RADIUS)
{
uint2 batch_opt = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd, X, cwidth, batch_ssds);
// For threads that do not satisfy the if condition below("X < cwidth - RADIUS"), previously
// computed "batch_opt" value, which is the result of "MinSSD" function call, is not used at all.
//
// However, since the "MinSSD" function has "__syncthreads" call in its body, those threads
// must also call "MinSSD" to avoid deadlock. (#13850)
//
// From CUDA 9, using "__syncwarp" with proper mask value instead of using "__syncthreads"
// could be an option, but the shared memory access pattern does not allow this option,
// resulting in race condition. (Checked via "cuda-memcheck --tool racecheck")
if (X < cwidth - RADIUS)
{
unsigned int last_opt = line_ssd_tails[3*0 + 0];
unsigned int opt = ::min(last_opt, batch_opt.x);
if (uniquenessRatio > 0)
{
line_ssds[0] = line_ssd_tails[3*0 + 1];
line_ssds[1] = line_ssd_tails[3*0 + 2];
float thresh = thresh_scale * opt;
int dtest = local_disparity[0];
if(batch_opt.x < last_opt)
{
uniqueness_approved[0] = 1;
dtest = d + batch_opt.y;
if ((local_disparity[0] < dtest-1 || local_disparity[0] > dtest+1) && (last_opt <= thresh))
{
uniqueness_approved[0] = 0;
}
}
if(uniqueness_approved[0])
{
// the trial to decompose the code on 2 loops without ld vs dtest makes
// uniqueness check dramatically slow. at least on gf 1080
for (int ld = d-2; ld < d + N_DISPARITIES; ld++)
{
if ((ld < dtest-1 || ld > dtest+1) && (line_ssds[ld-d+2] <= thresh))
{
uniqueness_approved[0] = 0;
break;
}
}
}
line_ssd_tails[3*0 + 1] = batch_ssds[6];
line_ssd_tails[3*0 + 2] = batch_ssds[7];
}
line_ssd_tails[3*0 + 0] = opt;
if (batch_opt.x < last_opt)
{
local_disparity[0] = (unsigned char)(d + batch_opt.y);
}
}
}
for(int row = 1; row < end_row; row++)
{
int idx1 = y_tex * img_step + x_tex;
int idx2 = (y_tex + (2 * RADIUS + 1)) * img_step + x_tex;
__syncthreads();
StepDown<RADIUS>(idx1, idx2, left, right, d, col_ssd);
if (col_ssd_extra)
if (x_tex + BLOCK_W < cwidth)
StepDown<RADIUS>(idx1, idx2, left + BLOCK_W, right + BLOCK_W, d, col_ssd_extra);
y_tex += 1;
__syncthreads();
if (row < cheight - RADIUS - Y)
{
uint2 batch_opt = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd, X, cwidth, batch_ssds);
// For threads that do not satisfy the if condition below("X < cwidth - RADIUS"), previously
// computed "batch_opt" value, which is the result of "MinSSD" function call, is not used at all.
//
// However, since the "MinSSD" function has "__syncthreads" call in its body, those threads
// must also call "MinSSD" to avoid deadlock. (#13850)
//
// From CUDA 9, using "__syncwarp" with proper mask value instead of using "__syncthreads"
// could be an option, but the shared memory access pattern does not allow this option,
// resulting in race condition. (Checked via "cuda-memcheck --tool racecheck")
if (X < cwidth - RADIUS)
{
unsigned int last_opt = line_ssd_tails[3*row + 0];
unsigned int opt = ::min(last_opt, batch_opt.x);
if (uniquenessRatio > 0)
{
line_ssds[0] = line_ssd_tails[3*row + 1];
line_ssds[1] = line_ssd_tails[3*row + 2];
float thresh = thresh_scale * opt;
int dtest = local_disparity[row];
if(batch_opt.x < last_opt)
{
uniqueness_approved[row] = 1;
dtest = d + batch_opt.y;
if ((local_disparity[row] < dtest-1 || local_disparity[row] > dtest+1) && (last_opt <= thresh))
{
uniqueness_approved[row] = 0;
}
}
if(uniqueness_approved[row])
{
for (int ld = 0; ld < N_DISPARITIES + 2; ld++)
{
if (((d+ld-2 < dtest-1) || (d+ld-2 > dtest+1)) && (line_ssds[ld] <= thresh))
{
uniqueness_approved[row] = 0;
break;
}
}
}
line_ssd_tails[3*row + 1] = batch_ssds[6];
line_ssd_tails[3*row + 2] = batch_ssds[7];
}
line_ssd_tails[3*row + 0] = opt;
if (batch_opt.x < last_opt)
{
local_disparity[row] = (unsigned char)(d + batch_opt.y);
}
}
}
} // for row loop
__syncthreads(); // before initializing shared memory at the beginning of next loop
} // for d loop
for (int row = 0; row < end_row; row++)
{
minSSDImage[row * cminSSD_step] = line_ssd_tails[3*row + 0];
}
if (uniquenessRatio > 0)
{
for (int row = 0; row < end_row; row++)
{
// drop disparity for pixel where uniqueness requirement was not satisfied (zero value)
disparImage[disp.step * row] = local_disparity[row] * uniqueness_approved[row];
}
}
else
{
for (int row = 0; row < end_row; row++)
{
disparImage[disp.step * row] = local_disparity[row];
}
}
}
template<int RADIUS> void kernel_caller(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp,
int maxdisp, int uniquenessRatio, unsigned int* missd_buffer,
size_t minssd_step, int cwidth, int cheight, hipStream_t & stream)
{
dim3 grid(1,1,1);
dim3 threads(BLOCK_W, 1, 1);
grid.x = divUp(left.cols - maxdisp - 2 * RADIUS, BLOCK_W);
grid.y = divUp(left.rows - 2 * RADIUS, ROWSperTHREAD);
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
size_t smem_size = (BLOCK_W + N_DISPARITIES * (BLOCK_W + 2 * RADIUS)) * sizeof(unsigned int);
hipLaunchKernelGGL(( stereoKernel<RADIUS>), dim3(grid), dim3(threads), smem_size, stream, left.data, right.data, left.step, disp, maxdisp, uniquenessRatio,
missd_buffer, minssd_step, cwidth, cheight);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
};
typedef void (*kernel_caller_t)(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp,
int maxdisp, int uniquenessRatio, unsigned int* missd_buffer,
size_t minssd_step, int cwidth, int cheight, hipStream_t & stream);
const static kernel_caller_t callers[] =
{
0,
kernel_caller< 1>, kernel_caller< 2>, kernel_caller< 3>, kernel_caller< 4>, kernel_caller< 5>,
kernel_caller< 6>, kernel_caller< 7>, kernel_caller< 8>, kernel_caller< 9>, kernel_caller<10>,
kernel_caller<11>, kernel_caller<12>, kernel_caller<13>, kernel_caller<14>, kernel_caller<15>,
kernel_caller<16>, kernel_caller<17>, kernel_caller<18>, kernel_caller<19>, kernel_caller<20>,
kernel_caller<21>, kernel_caller<22>, kernel_caller<23>, kernel_caller<24>, kernel_caller<25>
//0,0,0, 0,0,0, 0,0,kernel_caller<9>
};
const int calles_num = sizeof(callers)/sizeof(callers[0]);
void stereoBM_CUDA(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp, int maxdisp,
int winsz, int uniquenessRatio, const PtrStepSz<unsigned int>& minSSD_buf, hipStream_t& stream)
{
int winsz2 = winsz >> 1;
if (winsz2 == 0 || winsz2 >= calles_num)
CV_Error(cv::Error::StsBadArg, "Unsupported window size");
cudaSafeCall( hipMemset2DAsync(disp.data, disp.step, 0, disp.cols, disp.rows, stream) );
cudaSafeCall( hipMemset2DAsync(minSSD_buf.data, minSSD_buf.step, 0xFF, minSSD_buf.cols * minSSD_buf.elemSize(), disp.rows, stream) );
size_t minssd_step = minSSD_buf.step/minSSD_buf.elemSize();
callers[winsz2](left, right, disp, maxdisp, uniquenessRatio, minSSD_buf.data, minssd_step, left.cols, left.rows, stream);
}
__device__ inline int clamp(int x, int a, int b)
{
return ::max(a, ::min(b, x));
}
//////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////// Sobel Prefiler ///////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void prefilter_kernel_xsobel(PtrStepSzb input, PtrStepSzb output, int prefilterCap)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < output.cols && y < output.rows)
{
int conv = input.ptr(::max(0,y-1))[::max(0,x-1)] * (-1) + input.ptr(::max(0, y-1))[::min(x+1, input.cols-1)] * (1) +
input.ptr(y )[::max(0,x-1)] * (-2) + input.ptr(y )[::min(x+1, input.cols-1)] * (2) +
input.ptr(::min(y+1, input.rows-1))[::max(0,x-1)] * (-1) + input.ptr(::min(y+1, input.rows-1))[::min(x+1,input.cols-1)] * (1);
conv = ::min(::min(::max(-prefilterCap, conv), prefilterCap) + prefilterCap, 255);
output.ptr(y)[x] = conv & 0xFF;
}
}
void prefilter_xsobel(const PtrStepSzb& input, const PtrStepSzb& output, int prefilterCap, hipStream_t & stream)
{
dim3 threads(16, 16, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(input.cols, threads.x);
grid.y = divUp(input.rows, threads.y);
hipLaunchKernelGGL(( prefilter_kernel_xsobel), dim3(grid), dim3(threads), 0, stream, input, output, prefilterCap);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
//////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////// Norm Prefiler ///////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void prefilter_kernel_norm(PtrStepSzb input, PtrStepSzb output, int prefilterCap, int scale_g, int scale_s, int winsize)
{
// prefilterCap in range 1..63, checked in StereoBMImpl::compute
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int cols = input.cols;
int rows = input.rows;
int WSZ2 = winsize / 2;
if(x < cols && y < rows)
{
int cov1 = input.ptr(::max(y-1, 0))[x] * 1 +
input.ptr(y)[::min(x+1, cols-1)] * 1 + input.ptr(y )[x] * 4 + input.ptr(y)[::min(x+1, cols-1)] * 1 +
input.ptr(::min(y+1, rows-1))[x] * 1;
int cov2 = 0;
for(int i = -WSZ2; i < WSZ2+1; i++)
for(int j = -WSZ2; j < WSZ2+1; j++)
cov2 += input.ptr(clamp(y+i, 0, rows-1))[clamp(x+j, 0, cols-1)];
int res = (cov1*scale_g - cov2*scale_s)>>10;
res = clamp(res, -prefilterCap, prefilterCap) + prefilterCap;
output.ptr(y)[x] = res;
}
}
void prefilter_norm(const PtrStepSzb& input, const PtrStepSzb& output, int prefilterCap, int winsize, hipStream_t & stream)
{
dim3 threads(16, 16, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(input.cols, threads.x);
grid.y = divUp(input.rows, threads.y);
int scale_g = winsize*winsize/8, scale_s = (1024 + scale_g)/(scale_g*2);
scale_g *= scale_s;
hipLaunchKernelGGL(( prefilter_kernel_norm), dim3(grid), dim3(threads), 0, stream, input, output, prefilterCap, scale_g, scale_s, winsize);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
//////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////// Textureness filtering ////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
__device__ __forceinline__ float sobel(cv::cudev::TexturePtr<uchar, float> texSrc, int x, int y)
{
float conv = texSrc(y - 1, x - 1) * (-1) + texSrc(y - 1, x + 1) * (1) +
texSrc(y, x - 1) * (-2) + texSrc(y, x + 1) * (2) +
texSrc(y + 1, x - 1) * (-1) + texSrc(y + 1, x + 1) * (1);
return fabs(conv);
}
__device__ float CalcSums(float *cols, float *cols_cache, int winsz)
{
float cache = 0;
float cache2 = 0;
int winsz2 = winsz/2;
for(int i = 1; i <= winsz2; i++)
cache += cols[i];
cols_cache[0] = cache;
__syncthreads();
if (threadIdx.x < blockDim.x - winsz2)
cache2 = cols_cache[winsz2];
else
for(int i = winsz2 + 1; i < winsz; i++)
cache2 += cols[i];
return cols[0] + cache + cache2;
}
#define RpT (2 * ROWSperTHREAD) // got experimentally
__global__ void textureness_kernel(cv::cudev::TexturePtr<uchar,float> texSrc, PtrStepSzb disp, int winsz, float threshold)
{
int winsz2 = winsz/2;
int n_dirty_pixels = (winsz2) * 2;
extern __shared__ float cols_cache[];
float *cols = cols_cache + blockDim.x + threadIdx.x;
float *cols_extra = threadIdx.x < n_dirty_pixels ? cols + blockDim.x : 0;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int beg_row = blockIdx.y * RpT;
int end_row = ::min(beg_row + RpT, disp.rows);
if (x < disp.cols)
{
int y = beg_row;
float sum = 0;
float sum_extra = 0;
for(int i = y - winsz2; i <= y + winsz2; ++i)
{
sum += sobel(texSrc, x - winsz2, i);
if (cols_extra)
sum_extra += sobel(texSrc, x + blockDim.x - winsz2, i);
}
*cols = sum;
if (cols_extra)
*cols_extra = sum_extra;
__syncthreads();
float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255;
if (sum_win < threshold)
disp.data[y * disp.step + x] = 0;
__syncthreads();
for(int y = beg_row + 1; y < end_row; ++y)
{
sum = sum - sobel(texSrc, x - winsz2, y - winsz2 - 1) + sobel(texSrc, x - winsz2, y + winsz2);
*cols = sum;
if (cols_extra)
{
sum_extra = sum_extra - sobel(texSrc, x + blockDim.x - winsz2, y - winsz2 - 1) + sobel(texSrc, x + blockDim.x - winsz2, y + winsz2);
*cols_extra = sum_extra;
}
__syncthreads();
float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255;
if (sum_win < threshold)
disp.data[y * disp.step + x] = 0;
__syncthreads();
}
}
}
void postfilter_textureness(const PtrStepSzb& input, int winsz, float avgTexturenessThreshold, const PtrStepSzb& disp, hipStream_t & stream)
{
avgTexturenessThreshold *= winsz * winsz;
cv::cudev::Texture<unsigned char, float> tex(input, false, hipFilterModeLinear, hipAddressModeWrap, hipReadModeNormalizedFloat);
dim3 threads(128, 1, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(input.cols, threads.x);
grid.y = divUp(input.rows, RpT);
size_t smem_size = (threads.x + threads.x + (winsz/2) * 2 ) * sizeof(float);
hipLaunchKernelGGL(( textureness_kernel), dim3(grid), dim3(threads), smem_size, stream, tex, disp, winsz, avgTexturenessThreshold);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
} // namespace stereobm
}}} // namespace cv { namespace cuda { namespace cudev
#endif /* CUDA_DISABLER */
| e30f23bdb04371ca87c3242487b39e9b92283de2.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include <opencv2/cudev/ptr2d/texture.hpp>
#include <limits.h>
namespace cv { namespace cuda { namespace device
{
namespace stereobm
{
//////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////// Stereo BM ////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
#define ROWSperTHREAD 21 // the number of rows a thread will process
#define BLOCK_W 128 // the thread block width (464)
#define N_DISPARITIES 8
#define STEREO_MIND 0 // The minimum d range to check
#define STEREO_DISP_STEP N_DISPARITIES // the d step, must be <= 1 to avoid aliasing
__device__ __forceinline__ int SQ(int a)
{
return a * a;
}
template<int RADIUS>
__device__ unsigned int CalcSSD(volatile unsigned int *col_ssd_cache, volatile unsigned int *col_ssd, const int X, int cwidth)
{
unsigned int cache = 0;
unsigned int cache2 = 0;
if (X < cwidth - RADIUS)
{
for(int i = 1; i <= RADIUS; i++)
cache += col_ssd[i];
}
col_ssd_cache[0] = cache;
__syncthreads();
if (X < cwidth - RADIUS)
{
if (threadIdx.x < BLOCK_W - RADIUS)
cache2 = col_ssd_cache[RADIUS];
else
for(int i = RADIUS + 1; i < (2 * RADIUS + 1); i++)
cache2 += col_ssd[i];
}
return col_ssd[0] + cache + cache2;
}
template<int RADIUS>
__device__ uint2 MinSSD(volatile unsigned int *col_ssd_cache, volatile unsigned int *col_ssd, const int X, int cwidth, unsigned int* ssd)
{
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
ssd[0] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 0 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[1] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 1 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[2] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 2 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[3] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 3 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[4] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 4 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[5] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 5 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[6] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 6 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[7] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 7 * (BLOCK_W + 2 * RADIUS), X, cwidth);
int mssd = ::min(::min(::min(ssd[0], ssd[1]), ::min(ssd[4], ssd[5])), ::min(::min(ssd[2], ssd[3]), ::min(ssd[6], ssd[7])));
int bestIdx = 0;
for (int i = 0; i < N_DISPARITIES; i++)
{
if (mssd == ssd[i])
bestIdx = i;
}
return make_uint2(mssd, bestIdx);
}
template<int RADIUS>
__device__ void StepDown(int idx1, int idx2, unsigned char* imageL, unsigned char* imageR, int d, volatile unsigned int *col_ssd)
{
unsigned char leftPixel1;
unsigned char leftPixel2;
unsigned char rightPixel1[8];
unsigned char rightPixel2[8];
unsigned int diff1, diff2;
leftPixel1 = imageL[idx1];
leftPixel2 = imageL[idx2];
idx1 = idx1 - d;
idx2 = idx2 - d;
rightPixel1[7] = imageR[idx1 - 7];
rightPixel1[0] = imageR[idx1 - 0];
rightPixel1[1] = imageR[idx1 - 1];
rightPixel1[2] = imageR[idx1 - 2];
rightPixel1[3] = imageR[idx1 - 3];
rightPixel1[4] = imageR[idx1 - 4];
rightPixel1[5] = imageR[idx1 - 5];
rightPixel1[6] = imageR[idx1 - 6];
rightPixel2[7] = imageR[idx2 - 7];
rightPixel2[0] = imageR[idx2 - 0];
rightPixel2[1] = imageR[idx2 - 1];
rightPixel2[2] = imageR[idx2 - 2];
rightPixel2[3] = imageR[idx2 - 3];
rightPixel2[4] = imageR[idx2 - 4];
rightPixel2[5] = imageR[idx2 - 5];
rightPixel2[6] = imageR[idx2 - 6];
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
diff1 = leftPixel1 - rightPixel1[0];
diff2 = leftPixel2 - rightPixel2[0];
col_ssd[0 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[1];
diff2 = leftPixel2 - rightPixel2[1];
col_ssd[1 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[2];
diff2 = leftPixel2 - rightPixel2[2];
col_ssd[2 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[3];
diff2 = leftPixel2 - rightPixel2[3];
col_ssd[3 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[4];
diff2 = leftPixel2 - rightPixel2[4];
col_ssd[4 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[5];
diff2 = leftPixel2 - rightPixel2[5];
col_ssd[5 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[6];
diff2 = leftPixel2 - rightPixel2[6];
col_ssd[6 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[7];
diff2 = leftPixel2 - rightPixel2[7];
col_ssd[7 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
}
template<int RADIUS>
__device__ void InitColSSD(int x_tex, int y_tex, int im_pitch, unsigned char* imageL, unsigned char* imageR, int d, volatile unsigned int *col_ssd)
{
unsigned char leftPixel1;
int idx;
unsigned int diffa[] = {0, 0, 0, 0, 0, 0, 0, 0};
for(int i = 0; i < (2 * RADIUS + 1); i++)
{
idx = y_tex * im_pitch + x_tex;
leftPixel1 = imageL[idx];
idx = idx - d;
diffa[0] += SQ(leftPixel1 - imageR[idx - 0]);
diffa[1] += SQ(leftPixel1 - imageR[idx - 1]);
diffa[2] += SQ(leftPixel1 - imageR[idx - 2]);
diffa[3] += SQ(leftPixel1 - imageR[idx - 3]);
diffa[4] += SQ(leftPixel1 - imageR[idx - 4]);
diffa[5] += SQ(leftPixel1 - imageR[idx - 5]);
diffa[6] += SQ(leftPixel1 - imageR[idx - 6]);
diffa[7] += SQ(leftPixel1 - imageR[idx - 7]);
y_tex += 1;
}
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
col_ssd[0 * (BLOCK_W + 2 * RADIUS)] = diffa[0];
col_ssd[1 * (BLOCK_W + 2 * RADIUS)] = diffa[1];
col_ssd[2 * (BLOCK_W + 2 * RADIUS)] = diffa[2];
col_ssd[3 * (BLOCK_W + 2 * RADIUS)] = diffa[3];
col_ssd[4 * (BLOCK_W + 2 * RADIUS)] = diffa[4];
col_ssd[5 * (BLOCK_W + 2 * RADIUS)] = diffa[5];
col_ssd[6 * (BLOCK_W + 2 * RADIUS)] = diffa[6];
col_ssd[7 * (BLOCK_W + 2 * RADIUS)] = diffa[7];
}
template<int RADIUS>
__global__ void stereoKernel(unsigned char *left, unsigned char *right, size_t img_step, PtrStepb disp, int maxdisp,
int uniquenessRatio, unsigned int* cminSSDImage, size_t cminSSD_step, int cwidth, int cheight)
{
extern __shared__ unsigned int col_ssd_cache[];
uint line_ssds[2 + N_DISPARITIES]; // +2 - tail of previous batch for accurate uniquenessRatio check
uint* batch_ssds = line_ssds + 2;
uint line_ssd_tails[3*ROWSperTHREAD];
uchar uniqueness_approved[ROWSperTHREAD];
uchar local_disparity[ROWSperTHREAD];
volatile unsigned int *col_ssd = col_ssd_cache + BLOCK_W + threadIdx.x;
volatile unsigned int *col_ssd_extra = threadIdx.x < (2 * RADIUS) ? col_ssd + BLOCK_W : 0;
const int X = (blockIdx.x * BLOCK_W + threadIdx.x + maxdisp + RADIUS);
const int Y = (blockIdx.y * ROWSperTHREAD + RADIUS);
unsigned int* minSSDImage = cminSSDImage + X + Y * cminSSD_step;
unsigned char* disparImage = disp.data + X + Y * disp.step;
float thresh_scale;
int end_row = ::min(ROWSperTHREAD, cheight - Y - RADIUS);
int y_tex;
int x_tex = X - RADIUS;
if (x_tex >= cwidth)
return;
for(int i = 0; i < ROWSperTHREAD; i++)
local_disparity[i] = 0;
for(int i = 0; i < 3*ROWSperTHREAD; i++)
{
line_ssd_tails[i] = UINT_MAX;
}
if (uniquenessRatio > 0)
{
batch_ssds[6] = UINT_MAX;
batch_ssds[7] = UINT_MAX;
thresh_scale = (1.0 + uniquenessRatio / 100.0f);
for(int i = 0; i < ROWSperTHREAD; i++)
{
uniqueness_approved[i] = 1;
}
}
for(int d = STEREO_MIND; d < maxdisp; d += STEREO_DISP_STEP)
{
y_tex = Y - RADIUS;
InitColSSD<RADIUS>(x_tex, y_tex, img_step, left, right, d, col_ssd);
if (col_ssd_extra != nullptr)
if (x_tex + BLOCK_W < cwidth)
InitColSSD<RADIUS>(x_tex + BLOCK_W, y_tex, img_step, left, right, d, col_ssd_extra);
__syncthreads(); //before MinSSD function
if (Y < cheight - RADIUS)
{
uint2 batch_opt = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd, X, cwidth, batch_ssds);
// For threads that do not satisfy the if condition below("X < cwidth - RADIUS"), previously
// computed "batch_opt" value, which is the result of "MinSSD" function call, is not used at all.
//
// However, since the "MinSSD" function has "__syncthreads" call in its body, those threads
// must also call "MinSSD" to avoid deadlock. (#13850)
//
// From CUDA 9, using "__syncwarp" with proper mask value instead of using "__syncthreads"
// could be an option, but the shared memory access pattern does not allow this option,
// resulting in race condition. (Checked via "cuda-memcheck --tool racecheck")
if (X < cwidth - RADIUS)
{
unsigned int last_opt = line_ssd_tails[3*0 + 0];
unsigned int opt = ::min(last_opt, batch_opt.x);
if (uniquenessRatio > 0)
{
line_ssds[0] = line_ssd_tails[3*0 + 1];
line_ssds[1] = line_ssd_tails[3*0 + 2];
float thresh = thresh_scale * opt;
int dtest = local_disparity[0];
if(batch_opt.x < last_opt)
{
uniqueness_approved[0] = 1;
dtest = d + batch_opt.y;
if ((local_disparity[0] < dtest-1 || local_disparity[0] > dtest+1) && (last_opt <= thresh))
{
uniqueness_approved[0] = 0;
}
}
if(uniqueness_approved[0])
{
// the trial to decompose the code on 2 loops without ld vs dtest makes
// uniqueness check dramatically slow. at least on gf 1080
for (int ld = d-2; ld < d + N_DISPARITIES; ld++)
{
if ((ld < dtest-1 || ld > dtest+1) && (line_ssds[ld-d+2] <= thresh))
{
uniqueness_approved[0] = 0;
break;
}
}
}
line_ssd_tails[3*0 + 1] = batch_ssds[6];
line_ssd_tails[3*0 + 2] = batch_ssds[7];
}
line_ssd_tails[3*0 + 0] = opt;
if (batch_opt.x < last_opt)
{
local_disparity[0] = (unsigned char)(d + batch_opt.y);
}
}
}
for(int row = 1; row < end_row; row++)
{
int idx1 = y_tex * img_step + x_tex;
int idx2 = (y_tex + (2 * RADIUS + 1)) * img_step + x_tex;
__syncthreads();
StepDown<RADIUS>(idx1, idx2, left, right, d, col_ssd);
if (col_ssd_extra)
if (x_tex + BLOCK_W < cwidth)
StepDown<RADIUS>(idx1, idx2, left + BLOCK_W, right + BLOCK_W, d, col_ssd_extra);
y_tex += 1;
__syncthreads();
if (row < cheight - RADIUS - Y)
{
uint2 batch_opt = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd, X, cwidth, batch_ssds);
// For threads that do not satisfy the if condition below("X < cwidth - RADIUS"), previously
// computed "batch_opt" value, which is the result of "MinSSD" function call, is not used at all.
//
// However, since the "MinSSD" function has "__syncthreads" call in its body, those threads
// must also call "MinSSD" to avoid deadlock. (#13850)
//
// From CUDA 9, using "__syncwarp" with proper mask value instead of using "__syncthreads"
// could be an option, but the shared memory access pattern does not allow this option,
// resulting in race condition. (Checked via "cuda-memcheck --tool racecheck")
if (X < cwidth - RADIUS)
{
unsigned int last_opt = line_ssd_tails[3*row + 0];
unsigned int opt = ::min(last_opt, batch_opt.x);
if (uniquenessRatio > 0)
{
line_ssds[0] = line_ssd_tails[3*row + 1];
line_ssds[1] = line_ssd_tails[3*row + 2];
float thresh = thresh_scale * opt;
int dtest = local_disparity[row];
if(batch_opt.x < last_opt)
{
uniqueness_approved[row] = 1;
dtest = d + batch_opt.y;
if ((local_disparity[row] < dtest-1 || local_disparity[row] > dtest+1) && (last_opt <= thresh))
{
uniqueness_approved[row] = 0;
}
}
if(uniqueness_approved[row])
{
for (int ld = 0; ld < N_DISPARITIES + 2; ld++)
{
if (((d+ld-2 < dtest-1) || (d+ld-2 > dtest+1)) && (line_ssds[ld] <= thresh))
{
uniqueness_approved[row] = 0;
break;
}
}
}
line_ssd_tails[3*row + 1] = batch_ssds[6];
line_ssd_tails[3*row + 2] = batch_ssds[7];
}
line_ssd_tails[3*row + 0] = opt;
if (batch_opt.x < last_opt)
{
local_disparity[row] = (unsigned char)(d + batch_opt.y);
}
}
}
} // for row loop
__syncthreads(); // before initializing shared memory at the beginning of next loop
} // for d loop
for (int row = 0; row < end_row; row++)
{
minSSDImage[row * cminSSD_step] = line_ssd_tails[3*row + 0];
}
if (uniquenessRatio > 0)
{
for (int row = 0; row < end_row; row++)
{
// drop disparity for pixel where uniqueness requirement was not satisfied (zero value)
disparImage[disp.step * row] = local_disparity[row] * uniqueness_approved[row];
}
}
else
{
for (int row = 0; row < end_row; row++)
{
disparImage[disp.step * row] = local_disparity[row];
}
}
}
template<int RADIUS> void kernel_caller(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp,
int maxdisp, int uniquenessRatio, unsigned int* missd_buffer,
size_t minssd_step, int cwidth, int cheight, cudaStream_t & stream)
{
dim3 grid(1,1,1);
dim3 threads(BLOCK_W, 1, 1);
grid.x = divUp(left.cols - maxdisp - 2 * RADIUS, BLOCK_W);
grid.y = divUp(left.rows - 2 * RADIUS, ROWSperTHREAD);
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
size_t smem_size = (BLOCK_W + N_DISPARITIES * (BLOCK_W + 2 * RADIUS)) * sizeof(unsigned int);
stereoKernel<RADIUS><<<grid, threads, smem_size, stream>>>(left.data, right.data, left.step, disp, maxdisp, uniquenessRatio,
missd_buffer, minssd_step, cwidth, cheight);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
};
typedef void (*kernel_caller_t)(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp,
int maxdisp, int uniquenessRatio, unsigned int* missd_buffer,
size_t minssd_step, int cwidth, int cheight, cudaStream_t & stream);
const static kernel_caller_t callers[] =
{
0,
kernel_caller< 1>, kernel_caller< 2>, kernel_caller< 3>, kernel_caller< 4>, kernel_caller< 5>,
kernel_caller< 6>, kernel_caller< 7>, kernel_caller< 8>, kernel_caller< 9>, kernel_caller<10>,
kernel_caller<11>, kernel_caller<12>, kernel_caller<13>, kernel_caller<14>, kernel_caller<15>,
kernel_caller<16>, kernel_caller<17>, kernel_caller<18>, kernel_caller<19>, kernel_caller<20>,
kernel_caller<21>, kernel_caller<22>, kernel_caller<23>, kernel_caller<24>, kernel_caller<25>
//0,0,0, 0,0,0, 0,0,kernel_caller<9>
};
const int calles_num = sizeof(callers)/sizeof(callers[0]);
void stereoBM_CUDA(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp, int maxdisp,
int winsz, int uniquenessRatio, const PtrStepSz<unsigned int>& minSSD_buf, cudaStream_t& stream)
{
int winsz2 = winsz >> 1;
if (winsz2 == 0 || winsz2 >= calles_num)
CV_Error(cv::Error::StsBadArg, "Unsupported window size");
cudaSafeCall( cudaMemset2DAsync(disp.data, disp.step, 0, disp.cols, disp.rows, stream) );
cudaSafeCall( cudaMemset2DAsync(minSSD_buf.data, minSSD_buf.step, 0xFF, minSSD_buf.cols * minSSD_buf.elemSize(), disp.rows, stream) );
size_t minssd_step = minSSD_buf.step/minSSD_buf.elemSize();
callers[winsz2](left, right, disp, maxdisp, uniquenessRatio, minSSD_buf.data, minssd_step, left.cols, left.rows, stream);
}
__device__ inline int clamp(int x, int a, int b)
{
return ::max(a, ::min(b, x));
}
//////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////// Sobel Prefiler ///////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void prefilter_kernel_xsobel(PtrStepSzb input, PtrStepSzb output, int prefilterCap)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < output.cols && y < output.rows)
{
int conv = input.ptr(::max(0,y-1))[::max(0,x-1)] * (-1) + input.ptr(::max(0, y-1))[::min(x+1, input.cols-1)] * (1) +
input.ptr(y )[::max(0,x-1)] * (-2) + input.ptr(y )[::min(x+1, input.cols-1)] * (2) +
input.ptr(::min(y+1, input.rows-1))[::max(0,x-1)] * (-1) + input.ptr(::min(y+1, input.rows-1))[::min(x+1,input.cols-1)] * (1);
conv = ::min(::min(::max(-prefilterCap, conv), prefilterCap) + prefilterCap, 255);
output.ptr(y)[x] = conv & 0xFF;
}
}
void prefilter_xsobel(const PtrStepSzb& input, const PtrStepSzb& output, int prefilterCap, cudaStream_t & stream)
{
dim3 threads(16, 16, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(input.cols, threads.x);
grid.y = divUp(input.rows, threads.y);
prefilter_kernel_xsobel<<<grid, threads, 0, stream>>>(input, output, prefilterCap);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
//////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////// Norm Prefiler ///////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void prefilter_kernel_norm(PtrStepSzb input, PtrStepSzb output, int prefilterCap, int scale_g, int scale_s, int winsize)
{
// prefilterCap in range 1..63, checked in StereoBMImpl::compute
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int cols = input.cols;
int rows = input.rows;
int WSZ2 = winsize / 2;
if(x < cols && y < rows)
{
int cov1 = input.ptr(::max(y-1, 0))[x] * 1 +
input.ptr(y)[::min(x+1, cols-1)] * 1 + input.ptr(y )[x] * 4 + input.ptr(y)[::min(x+1, cols-1)] * 1 +
input.ptr(::min(y+1, rows-1))[x] * 1;
int cov2 = 0;
for(int i = -WSZ2; i < WSZ2+1; i++)
for(int j = -WSZ2; j < WSZ2+1; j++)
cov2 += input.ptr(clamp(y+i, 0, rows-1))[clamp(x+j, 0, cols-1)];
int res = (cov1*scale_g - cov2*scale_s)>>10;
res = clamp(res, -prefilterCap, prefilterCap) + prefilterCap;
output.ptr(y)[x] = res;
}
}
void prefilter_norm(const PtrStepSzb& input, const PtrStepSzb& output, int prefilterCap, int winsize, cudaStream_t & stream)
{
dim3 threads(16, 16, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(input.cols, threads.x);
grid.y = divUp(input.rows, threads.y);
int scale_g = winsize*winsize/8, scale_s = (1024 + scale_g)/(scale_g*2);
scale_g *= scale_s;
prefilter_kernel_norm<<<grid, threads, 0, stream>>>(input, output, prefilterCap, scale_g, scale_s, winsize);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
//////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////// Textureness filtering ////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
__device__ __forceinline__ float sobel(cv::cudev::TexturePtr<uchar, float> texSrc, int x, int y)
{
float conv = texSrc(y - 1, x - 1) * (-1) + texSrc(y - 1, x + 1) * (1) +
texSrc(y, x - 1) * (-2) + texSrc(y, x + 1) * (2) +
texSrc(y + 1, x - 1) * (-1) + texSrc(y + 1, x + 1) * (1);
return fabs(conv);
}
__device__ float CalcSums(float *cols, float *cols_cache, int winsz)
{
float cache = 0;
float cache2 = 0;
int winsz2 = winsz/2;
for(int i = 1; i <= winsz2; i++)
cache += cols[i];
cols_cache[0] = cache;
__syncthreads();
if (threadIdx.x < blockDim.x - winsz2)
cache2 = cols_cache[winsz2];
else
for(int i = winsz2 + 1; i < winsz; i++)
cache2 += cols[i];
return cols[0] + cache + cache2;
}
#define RpT (2 * ROWSperTHREAD) // got experimentally
__global__ void textureness_kernel(cv::cudev::TexturePtr<uchar,float> texSrc, PtrStepSzb disp, int winsz, float threshold)
{
int winsz2 = winsz/2;
int n_dirty_pixels = (winsz2) * 2;
extern __shared__ float cols_cache[];
float *cols = cols_cache + blockDim.x + threadIdx.x;
float *cols_extra = threadIdx.x < n_dirty_pixels ? cols + blockDim.x : 0;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int beg_row = blockIdx.y * RpT;
int end_row = ::min(beg_row + RpT, disp.rows);
if (x < disp.cols)
{
int y = beg_row;
float sum = 0;
float sum_extra = 0;
for(int i = y - winsz2; i <= y + winsz2; ++i)
{
sum += sobel(texSrc, x - winsz2, i);
if (cols_extra)
sum_extra += sobel(texSrc, x + blockDim.x - winsz2, i);
}
*cols = sum;
if (cols_extra)
*cols_extra = sum_extra;
__syncthreads();
float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255;
if (sum_win < threshold)
disp.data[y * disp.step + x] = 0;
__syncthreads();
for(int y = beg_row + 1; y < end_row; ++y)
{
sum = sum - sobel(texSrc, x - winsz2, y - winsz2 - 1) + sobel(texSrc, x - winsz2, y + winsz2);
*cols = sum;
if (cols_extra)
{
sum_extra = sum_extra - sobel(texSrc, x + blockDim.x - winsz2, y - winsz2 - 1) + sobel(texSrc, x + blockDim.x - winsz2, y + winsz2);
*cols_extra = sum_extra;
}
__syncthreads();
float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255;
if (sum_win < threshold)
disp.data[y * disp.step + x] = 0;
__syncthreads();
}
}
}
void postfilter_textureness(const PtrStepSzb& input, int winsz, float avgTexturenessThreshold, const PtrStepSzb& disp, cudaStream_t & stream)
{
avgTexturenessThreshold *= winsz * winsz;
cv::cudev::Texture<unsigned char, float> tex(input, false, cudaFilterModeLinear, cudaAddressModeWrap, cudaReadModeNormalizedFloat);
dim3 threads(128, 1, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(input.cols, threads.x);
grid.y = divUp(input.rows, RpT);
size_t smem_size = (threads.x + threads.x + (winsz/2) * 2 ) * sizeof(float);
textureness_kernel<<<grid, threads, smem_size, stream>>>(tex, disp, winsz, avgTexturenessThreshold);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
} // namespace stereobm
}}} // namespace cv { namespace cuda { namespace cudev
#endif /* CUDA_DISABLER */
|
8276638f5b01128c1708045e959e94d4325811b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void inclusive_scan(const unsigned int *X, unsigned int *Y, int N)
{
extern __shared__ int XY[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
if(i<N)
{
XY[threadIdx.x] =X[i];
}
/*Note here stride <= threadIdx.x, means that everytime the threads with threadIdx.x less than
stride do not participate in loop*/
for(unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
XY[threadIdx.x]+= XY[threadIdx.x - stride];
}
/*This is executed by all threads, so that they store the final prefix sum to
corresponding locations in global memory*/
Y[i]=XY[threadIdx.x];
// wait until all threads of this block writes the output for all prefix sum within the block
__syncthreads();
if (threadIdx.x < blockIdx.x) //for 1st block onwards
{
//update the shared memory to keep prefix sum of last elements of previous block's
XY[threadIdx.x] = Y[threadIdx.x * blockDim.x + BLOCK_SIZE - 1];
}
__syncthreads();
for (int stride = 0; stride < blockIdx.x; stride++)
{ //add all previous las elements to this block elements
Y[threadIdx.x + blockDim.x * blockIdx.x] += XY[stride];
__syncthreads();
}
} | 8276638f5b01128c1708045e959e94d4325811b4.cu | #include "includes.h"
__global__ void inclusive_scan(const unsigned int *X, unsigned int *Y, int N)
{
extern __shared__ int XY[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
if(i<N)
{
XY[threadIdx.x] =X[i];
}
/*Note here stride <= threadIdx.x, means that everytime the threads with threadIdx.x less than
stride do not participate in loop*/
for(unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
XY[threadIdx.x]+= XY[threadIdx.x - stride];
}
/*This is executed by all threads, so that they store the final prefix sum to
corresponding locations in global memory*/
Y[i]=XY[threadIdx.x];
// wait until all threads of this block writes the output for all prefix sum within the block
__syncthreads();
if (threadIdx.x < blockIdx.x) //for 1st block onwards
{
//update the shared memory to keep prefix sum of last elements of previous block's
XY[threadIdx.x] = Y[threadIdx.x * blockDim.x + BLOCK_SIZE - 1];
}
__syncthreads();
for (int stride = 0; stride < blockIdx.x; stride++)
{ //add all previous las elements to this block elements
Y[threadIdx.x + blockDim.x * blockIdx.x] += XY[stride];
__syncthreads();
}
} |
6e17dc514243e351055fc941123607a919d87f81.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// numberCrunch.cu
// nestedDSMC
//
// Created by Christopher Watkins on 09/04/2015.
//
//
#include "numberCrunch.cuh"
#include "magneticField.cuh"
// Function prototypes
#define cudaCalloc(A, B, C) \
do { \
hipError_t __cudaCalloc_err = hipMalloc(A, (B)*C); \
if (__cudaCalloc_err == hipSuccess) hipMemset(*A, 0, (B)*C); \
} while (0)
__host__ double calculateKineticEnergy(double3 *d_vel,
int numberOfAtoms)
{
double *d_vel2;
cudaCalloc((void **)&d_vel2,
numberOfAtoms,
sizeof(double));
h_dot_prod(d_vel,
d_vel2,
numberOfAtoms);
thrust::device_ptr<double> th_vel2 = thrust::device_pointer_cast( d_vel2 );
double Ek = 0.5 * h_mRb * thrust::reduce(th_vel2,
th_vel2 + numberOfAtoms,
0.);
hipFree( d_vel2 );
return Ek;
}
__host__ double calculatePotentialEnergy(struct cudaGraphicsResource **cudaPBOres,
int numberOfAtoms)
{
double *d_absB;
cudaCalloc((void **)&d_absB,
numberOfAtoms,
sizeof(double));
double3* d_pos = mapCUDAVBOd3(cudaPBOres);
h_absB(d_pos,
d_absB,
numberOfAtoms);
thrust::device_ptr<double> th_absB = thrust::device_pointer_cast( d_absB );
double Ep = 0.5 * h_gs * h_muB * thrust::reduce(th_absB,
th_absB + numberOfAtoms,
0.);
hipFree( d_absB );
unmapCUDAVBO(cudaPBOres);
return Ep;
}
__host__ double calculateTemperature(double Ek,
int numberOfAtoms)
{
return 2. / 3. * Ek / h_kB / numberOfAtoms;
}
void h_dot_prod(double3 *d_in,
double *d_out,
int numberOfAtoms)
{
int blockSize;
int gridSize;
#ifdef CUDA7
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize,
&blockSize,
(const void *) d_dot_prod,
0,
numberOfAtoms );
gridSize = (numberOfAtoms + blockSize - 1) / blockSize;
#else
int device;
hipGetDevice ( &device );
int numSMs;
hipDeviceGetAttribute(&numSMs,
hipDeviceAttributeMultiprocessorCount,
device);
gridSize = 256*numSMs;
blockSize = NUM_THREADS;
#endif
hipLaunchKernelGGL(( d_dot_prod), dim3(gridSize),dim3(blockSize), 0, 0, d_in,
d_out,
numberOfAtoms);
return;
}
void h_absB(double3 *d_pos,
double *d_absB,
int numberOfAtoms)
{
int blockSize;
int gridSize;
#ifdef CUDA7
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize,
&blockSize,
(const void *) d_getAbsB,
0,
numberOfAtoms );
gridSize = (numberOfAtoms + blockSize - 1) / blockSize;
#else
int device;
hipGetDevice ( &device );
int numSMs;
hipDeviceGetAttribute(&numSMs,
hipDeviceAttributeMultiprocessorCount,
device);
gridSize = 256*numSMs;
blockSize = NUM_THREADS;
#endif
hipLaunchKernelGGL(( d_getAbsB), dim3(gridSize),dim3(blockSize), 0, 0, d_pos,
d_absB,
numberOfAtoms);
return;
}
__global__ void d_dot_prod(double3 *in,
double *out,
int numberOfAtoms)
{
for (int atom = blockIdx.x * blockDim.x + threadIdx.x;
atom < numberOfAtoms;
atom += blockDim.x * gridDim.x)
{
out[atom] = dot( in[atom], in[atom] );
}
return;
}
__global__ void d_getAbsB(double3 *pos,
double *d_absB,
int numberOfAtoms)
{
for (int atom = blockIdx.x * blockDim.x + threadIdx.x;
atom < numberOfAtoms;
atom += blockDim.x * gridDim.x)
{
d_absB[atom] = absB( pos[atom] );
}
return;
} | 6e17dc514243e351055fc941123607a919d87f81.cu | //
// numberCrunch.cu
// nestedDSMC
//
// Created by Christopher Watkins on 09/04/2015.
//
//
#include "numberCrunch.cuh"
#include "magneticField.cuh"
// Function prototypes
#define cudaCalloc(A, B, C) \
do { \
cudaError_t __cudaCalloc_err = cudaMalloc(A, (B)*C); \
if (__cudaCalloc_err == cudaSuccess) cudaMemset(*A, 0, (B)*C); \
} while (0)
__host__ double calculateKineticEnergy(double3 *d_vel,
int numberOfAtoms)
{
double *d_vel2;
cudaCalloc((void **)&d_vel2,
numberOfAtoms,
sizeof(double));
h_dot_prod(d_vel,
d_vel2,
numberOfAtoms);
thrust::device_ptr<double> th_vel2 = thrust::device_pointer_cast( d_vel2 );
double Ek = 0.5 * h_mRb * thrust::reduce(th_vel2,
th_vel2 + numberOfAtoms,
0.);
cudaFree( d_vel2 );
return Ek;
}
__host__ double calculatePotentialEnergy(struct cudaGraphicsResource **cudaPBOres,
int numberOfAtoms)
{
double *d_absB;
cudaCalloc((void **)&d_absB,
numberOfAtoms,
sizeof(double));
double3* d_pos = mapCUDAVBOd3(cudaPBOres);
h_absB(d_pos,
d_absB,
numberOfAtoms);
thrust::device_ptr<double> th_absB = thrust::device_pointer_cast( d_absB );
double Ep = 0.5 * h_gs * h_muB * thrust::reduce(th_absB,
th_absB + numberOfAtoms,
0.);
cudaFree( d_absB );
unmapCUDAVBO(cudaPBOres);
return Ep;
}
__host__ double calculateTemperature(double Ek,
int numberOfAtoms)
{
return 2. / 3. * Ek / h_kB / numberOfAtoms;
}
void h_dot_prod(double3 *d_in,
double *d_out,
int numberOfAtoms)
{
int blockSize;
int gridSize;
#ifdef CUDA7
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize,
&blockSize,
(const void *) d_dot_prod,
0,
numberOfAtoms );
gridSize = (numberOfAtoms + blockSize - 1) / blockSize;
#else
int device;
cudaGetDevice ( &device );
int numSMs;
cudaDeviceGetAttribute(&numSMs,
cudaDevAttrMultiProcessorCount,
device);
gridSize = 256*numSMs;
blockSize = NUM_THREADS;
#endif
d_dot_prod<<<gridSize,blockSize>>>(d_in,
d_out,
numberOfAtoms);
return;
}
void h_absB(double3 *d_pos,
double *d_absB,
int numberOfAtoms)
{
int blockSize;
int gridSize;
#ifdef CUDA7
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize,
&blockSize,
(const void *) d_getAbsB,
0,
numberOfAtoms );
gridSize = (numberOfAtoms + blockSize - 1) / blockSize;
#else
int device;
cudaGetDevice ( &device );
int numSMs;
cudaDeviceGetAttribute(&numSMs,
cudaDevAttrMultiProcessorCount,
device);
gridSize = 256*numSMs;
blockSize = NUM_THREADS;
#endif
d_getAbsB<<<gridSize,blockSize>>>(d_pos,
d_absB,
numberOfAtoms);
return;
}
__global__ void d_dot_prod(double3 *in,
double *out,
int numberOfAtoms)
{
for (int atom = blockIdx.x * blockDim.x + threadIdx.x;
atom < numberOfAtoms;
atom += blockDim.x * gridDim.x)
{
out[atom] = dot( in[atom], in[atom] );
}
return;
}
__global__ void d_getAbsB(double3 *pos,
double *d_absB,
int numberOfAtoms)
{
for (int atom = blockIdx.x * blockDim.x + threadIdx.x;
atom < numberOfAtoms;
atom += blockDim.x * gridDim.x)
{
d_absB[atom] = absB( pos[atom] );
}
return;
} |
0796ceb9275585aa0417599f77dc8f39da942d17.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/device_functions.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#define PI 3.1415926535897932
#define MAXEQNS 10 // maximum number of differential equations in the system
const int itermax10 = 2; // number of iterations to use for rk10
const int itermax12 = 1; // number of additional iterations to use for rk12
const int neqns = 2; // number of differential equations in the system
const double tol = 1.0e-10; // the error tolerance
const double tol10 = tol / 10;
const bool sho = true; // set sho to true if you want the simple harmonic oscillator results
// set sho to false, if you want the predator - prey results
// the following constants are the 10th order method's coefficients
const double a0 = 0;
__constant__ double a1 = 0.11747233803526765;
__constant__ double a2 = 0.35738424175967745;
__constant__ double a3 = 0.64261575824032255;
__constant__ double a4 = 0.88252766196473235;
const double a5 = 1.0000000000000000;
__constant__ double b10 = 0.047323231137709573;
__constant__ double b11 = 0.077952072407795078;
__constant__ double b12 = -0.010133421269900587;
__constant__ double b13 = 0.0028864915990617097;
__constant__ double b14 = -0.00055603583939812082;
__constant__ double b20 = 0.021779075831486075;
__constant__ double b21 = 0.22367959757928498;
__constant__ double b22 = 0.12204792759220492;
__constant__ double b23 = -0.012091266674498959;
__constant__ double b24 = 0.0019689074312004371;
__constant__ double b30 = 0.044887590835180592;
__constant__ double b31 = 0.15973856856089786;
__constant__ double b32 = 0.32285378852557547;
__constant__ double b33 = 0.12204792759220492;
__constant__ double b34 = -0.0069121172735362915;
__constant__ double b40 = 0.019343435528957094;
__constant__ double b41 = 0.22312684732165494;
__constant__ double b42 = 0.23418268877986459;
__constant__ double b43 = 0.32792261792646064;
__constant__ double b44 = 0.077952072407795078;
const double b50 = 0.066666666666666667;
const double b51 = 0.10981508874708385;
const double b52 = 0.37359383699761912;
const double b53 = 0.18126454003786724;
const double b54 = 0.26865986755076313;
const double c0 = 0.033333333333333333;
const double c1 = 0.18923747814892349;
const double c2 = 0.27742918851774318;
const double c3 = 0.27742918851774318;
const double c4 = 0.18923747814892349;
const double c5 = 0.033333333333333333;
// the following coefficients allow us to get rk12 internal xk values from rk10 fk values
__constant__ double g10 = 0.043407276098971173;
__constant__ double g11 = 0.049891561330903419;
__constant__ double g12 = -0.012483721919363355;
__constant__ double g13 = 0.0064848904066894701;
__constant__ double g14 = -0.0038158693974615597;
__constant__ double g15 = 0.0014039153409773882;
__constant__ double g20 = 0.030385164419638569;
__constant__ double g21 = 0.19605322645426044;
__constant__ double g22 = 0.047860687574395354;
__constant__ double g23 = -0.012887249003100515;
__constant__ double g24 = 0.0064058521980400821;
__constant__ double g25 = -0.0022420783785910372;
__constant__ double g30 = 0.032291666666666667;
__constant__ double g31 = 0.19311806292811784;
__constant__ double g32 = 0.25797759963091718;
__constant__ double g33 = 0.019451588886825999;
__constant__ double g34 = -0.0038805847791943522;
__constant__ double g35 = 0.0010416666666666667;
__constant__ double g40 = 0.035575411711924371;
__constant__ double g41 = 0.18283162595088341;
__constant__ double g42 = 0.29031643752084369;
__constant__ double g43 = 0.22956850094334782;
__constant__ double g44 = -0.0068157483053369507;
__constant__ double g45 = 0.0029481689136947641;
__constant__ double g50 = 0.031929417992355945;
__constant__ double g51 = 0.19305334754638505;
__constant__ double g52 = 0.27094429811105371;
__constant__ double g53 = 0.28991291043710653;
__constant__ double g54 = 0.13934591681802007;
__constant__ double g55 = -0.010073942765637839;
const double g60 = 0.033333333333333333;
const double g61 = 0.18923747814892349;
const double g62 = 0.27742918851774318;
const double g63 = 0.27742918851774318;
const double g64 = 0.18923747814892349;
const double g65 = 0.033333333333333333;
// the following constants are the 12th order method's coefficients
const double ah0 = 0.0;
const double ah1 = 0.084888051860716535;
const double ah2 = 0.26557560326464289;
const double ah3 = 0.50000000000000000;
const double ah4 = 0.73442439673535711;
const double ah5 = 0.91511194813928346;
const double ah6 = 1.0000000000000000;
__constant__ double bh10 = 0.033684534770907752;
__constant__ double bh11 = 0.057301749935629582;
__constant__ double bh12 = -0.0082444880936983822;
__constant__ double bh13 = 0.0029151263642014432;
__constant__ double bh14 = -0.00096482361331657787;
__constant__ double bh15 = 0.00019595249699271744;
__constant__ double bh20 = 0.015902242088596380;
__constant__ double bh21 = 0.16276437062291593;
__constant__ double bh22 = 0.096031583397703751;
__constant__ double bh23 = -0.011758319711158930;
__constant__ double bh24 = 0.0032543514515832418;
__constant__ double bh25 = -0.00061862458499748489;
__constant__ double bh30 = 0.031250000000000000;
__constant__ double bh31 = 0.11881843285766042;
__constant__ double bh32 = 0.24868761828096535;
__constant__ double bh33 = 0.11000000000000000;
__constant__ double bh34 = -0.010410996557394222;
__constant__ double bh35 = 0.0016549454187684515;
__constant__ double bh40 = 0.015902242088596380;
__constant__ double bh41 = 0.15809680304274781;
__constant__ double bh42 = 0.18880881534382426;
__constant__ double bh43 = 0.28087114502765051;
__constant__ double bh44 = 0.096031583397703751;
__constant__ double bh45 = -0.0052861921651656089;
__constant__ double bh50 = 0.033684534770907752;
__constant__ double bh51 = 0.11440754737426645;
__constant__ double bh52 = 0.24657204460460206;
__constant__ double bh53 = 0.20929436236889375;
__constant__ double bh54 = 0.25385170908498387;
__constant__ double bh55 = 0.057301749935629582;
const double bh60 = 0;
const double bh61 = 0.19581988897471611;
const double bh62 = 0.14418011102528389;
const double bh63 = 0.32000000000000000;
const double bh64 = 0.14418011102528389;
const double bh65 = 0.19581988897471611;
const double ch0 = 0.023809523809523810;
const double ch1 = 0.13841302368078297;
const double ch2 = 0.21587269060493131;
const double ch3 = 0.24380952380952381;
const double ch4 = 0.21587269060493131;
const double ch5 = 0.13841302368078297;
const double ch6 = 0.023809523809523810;
#define cudaErrorCheck(call) { cudaAssert(call,__FILE__,__LINE__); }
void cudaAssert(const hipError_t err, const char *file, const int line)
{
if( hipSuccess != err) {
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n",
file, line, hipGetErrorString(err) );
getchar();
exit(1);
}
}
//****************************************************************************
//Derivative kernel: takes pointers to x[], and f[] allocated on the device
__global__ void derKernel(double* device_x, double* device_f)
{
//2 elements in device_x represent 2 elements from individual arrays X1-X4;
//ie if thread id is 0 then the array number is 0x2 and work on elements tx*2 and tx*2 +1
int tx = threadIdx.x;
int xArrayNumber = tx *2;
device_f[xArrayNumber] = device_x[xArrayNumber+1];
__syncthreads();
device_f[xArrayNumber+1] = -device_x[xArrayNumber];
__syncthreads();
}
__global__ void guessKernel(double*device_X_Total, double* device_X_Not,double* device_F_Not, double h){
device_X_Total[threadIdx.x] = device_X_Not[threadIdx.x] + a1 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +2] = device_X_Not[threadIdx.x] + a2 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +4] = device_X_Not[threadIdx.x] + a3 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +6] = device_X_Not[threadIdx.x] + a4 * h * device_F_Not[threadIdx.x];
}
__global__ void order10Kernel(double*device_X_Total, double* device_X_Not, double *device_F_Not, double h, double *device_f)
{
int tx = threadIdx.x;
device_X_Total[tx]=device_X_Not[tx] + h*((b10 * device_F_Not[tx]) + (b11 * device_f[tx]) + (b12 * device_f[tx+2]) + (b13 * device_f[tx +4]) + (b14 * device_f[tx +6]));
__syncthreads();
device_X_Total[tx+2]=device_X_Not[tx] + h*((b20 * device_F_Not[tx]) + (b21 * device_f[tx]) + (b22 * device_f[tx+2]) + (b23 * device_f[tx +4]) + (b24 * device_f[tx +6]));
__syncthreads();
device_X_Total[tx+4]=device_X_Not[tx] + h*((b30 * device_F_Not[tx]) +( b31 * device_f[tx]) + (b32 * device_f[tx+2]) + (b33 * device_f[tx +4]) + (b34 * device_f[tx +6]));
__syncthreads();
device_X_Total[tx+6]=device_X_Not[tx] + h*((b40 * device_F_Not[tx]) + (b41 * device_f[tx]) +( b42 * device_f[tx+2]) + (b43 * device_f[tx +4]) +( b44 * device_f[tx +6]));
__syncthreads();
}
__global__ void Order10FkKernel(double*device_X_Total, double* device_X_Not, double* device_F_Not, double h, double*device_f)
{
int tx = threadIdx.x;
device_X_Total[tx] = device_X_Not[tx] + h*((g10*device_F_Not[tx])+ (g11 * device_f[tx]) + (g12 * device_f[tx+2])+ (g13 * device_f[tx + 4]) + (g14 * device_f[tx+ 6])+ (g15 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+2] = device_X_Not[tx] + h*((g20*device_F_Not[tx])+ (g21 * device_f[tx]) + (g22 * device_f[tx+2])+ (g23 * device_f[tx + 4]) + (g24 * device_f[tx+ 6])+ (g25 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+4] = device_X_Not[tx] + h*((g30*device_F_Not[tx])+ (g31 * device_f[tx]) + (g32 * device_f[tx+2])+ (g33 * device_f[tx + 4]) + (g34 * device_f[tx+ 6])+ (g35 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+6] = device_X_Not[tx] + h*((g40*device_F_Not[tx])+ (g41 * device_f[tx]) + (g42 * device_f[tx+2])+ (g43 * device_f[tx + 4]) + (g44 * device_f[tx+ 6])+ (g45 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+8] = device_X_Not[tx] + h*((g50*device_F_Not[tx])+ (g51 * device_f[tx]) + (g52 * device_f[tx+2])+ (g53 * device_f[tx + 4]) + (g54 * device_f[tx+ 6])+ (g55 *device_f[tx+8]));
__syncthreads();
}
__global__ void Order12Kernel(double*device_X_Total, double* device_X_Not, double* device_F_Not, double h, double*device_f){
int tx = threadIdx.x;
device_X_Total[tx] = device_X_Not[tx] + h*((bh10*device_F_Not[tx])+ (bh11 * device_f[tx]) + (bh12 * device_f[tx+2])+ (bh13 * device_f[tx + 4]) + (bh14 * device_f[tx+ 6])+ (bh15 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+2] = device_X_Not[tx] + h*((bh20*device_F_Not[tx])+ (bh21 * device_f[tx]) + (bh22 * device_f[tx+2])+ (bh23 * device_f[tx + 4]) + (bh24 * device_f[tx+ 6])+ (bh25 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+4] = device_X_Not[tx] + h*((bh30*device_F_Not[tx])+ (bh31 * device_f[tx]) + (bh32 * device_f[tx+2])+ (bh33 * device_f[tx + 4]) + (bh34 * device_f[tx+ 6])+ (bh35 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+6] = device_X_Not[tx] + h*((bh40*device_F_Not[tx])+ (bh41 * device_f[tx]) + (bh42 * device_f[tx+2])+ (bh43 * device_f[tx + 4]) + (bh44 * device_f[tx+ 6])+ (bh45 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+8] = device_X_Not[tx] + h*((bh50*device_F_Not[tx])+ (bh51 * device_f[tx]) + (bh52 * device_f[tx+2])+ (bh53 * device_f[tx + 4]) + (bh54 * device_f[tx+ 6])+ (bh55 *device_f[tx+8]));
__syncthreads();
}
//****************************************************************************
// the following function describes the ordinary differential equations
//** The function is still live for the non parallel function calls to der
//** still active in the program.
void der(double t, double x[], double f[]) {
if (sho) {
f[0] = x[1];
f[1] = -x[0];
}
else {
f[0] = x[0] * (2.0 - x[1]);
f[1] = x[1] * (x[0] - 1.0);
}
}
void rk1210() {
// Implicit Runge-Kutta of orders 12 and 10
double x0[MAXEQNS], x1[MAXEQNS], x2[MAXEQNS], x3[MAXEQNS], x4[MAXEQNS];
double x5[MAXEQNS], x6[MAXEQNS], xn10[MAXEQNS], xn12[MAXEQNS];
double t0, tf, h, hnew, est, esti, f0[MAXEQNS], f1[MAXEQNS], f2[MAXEQNS];
double f3[MAXEQNS], f4[MAXEQNS], f5[MAXEQNS], f6[MAXEQNS];
int iter;
bool finished = false; // becomes true when we have reached tf
if (sho) {
h = PI / 4.0; // initial guess for stepsize to use
x0[0] = 0.0; // initial value of first component
x0[1] = 1.0; // initial value of second component
t0 = 0.0; // initial t value, t0
tf = 2 * PI; // final t value, tf
}
else {
h = 1.0 / 2.0; // initial guess for stepsize to use
x0[0] = 2.0; // initial value of first component
x0[1] = 2.0; // initial value of second component
t0 = 0.0; // initial t value, t0
tf = 4.0; // final t value, tf
}
printf("Initial conditions are t0 = %8.5lf, x0[0] = %18.15lf, x0[1] = %18.15lf\n", t0, x0[0], x0[1]);
const int arraySize = 10; //there will be 8 elements being written from x1-x4 (Remaining 2 for when X5 is included);
int numOfXArrays =4;
double x_total[arraySize];
double f_total[arraySize];
while (!finished) { // keep going until we reach tf successfully
der(t0, x0, f0); // first, we will get 10th order results
//////////////////// THIS CAN BE DONE IN PARALLEL ///////////////////////
//for (int i = 0; i<neqns; i++) {
// x1[i] = x0[i] + a1*h*f0[i]; // just guess that solution is a straight line initially
// x2[i] = x0[i] + a2*h*f0[i]; // at the four internal points within the step
// x3[i] = x0[i] + a3*h*f0[i];
// x4[i] = x0[i] + a4*h*f0[i];
//}
//*************************************************************************************
double* device_x_total; double* device_x_not; double* device_f_not; //creating variables for the device
//allocating memory for device variables
hipMalloc((void**) &device_x_total, arraySize * sizeof(double));
hipMalloc((void**) &device_x_not, arraySize * sizeof(double));
hipMalloc((void**) &device_f_not, arraySize * sizeof(double));
//copying contents of x0 and f0 to the device variables
hipMemcpy(device_x_not, x0, arraySize*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_f_not, f0, arraySize *sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( guessKernel), dim3(1), dim3(neqns), 0, 0, device_x_total, device_x_not, device_f_not, h);
hipMemcpy(x_total, device_x_total, arraySize*sizeof(double), hipMemcpyDeviceToHost);
//************************************************************************************
//////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
//der(t0 + a1*h, x1, f1); // now, evaluate the derivatives at these four points
//der(t0 + a2*h, x2, f2);
//der(t0 + a3*h, x3, f3);
//der(t0 + a4*h, x4, f4);
//****************************************************************************************
double* device_f; //creating variables for device;
//allocating memory for x[], and f[]
hipMalloc((void**) &device_x_total, arraySize* sizeof(double));
hipMalloc((void**) &device_f, arraySize * sizeof(double));
//copying over t and x[]
hipMemcpy(device_x_total, x_total, arraySize*sizeof(double), hipMemcpyHostToDevice);
//*******Creating timers to test 4 arrays *********
/*hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);*/
//kernel call
hipLaunchKernelGGL(( derKernel), dim3(1),dim3(numOfXArrays), 0, 0, device_x_total, device_f);
/* hipEventRecord(stop);*/
//copying data from device to host
hipMemcpy(f_total, device_f, arraySize*sizeof(double), hipMemcpyDeviceToHost);
/*hipEventSynchronize(stop);
float milliseconds =0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Kernel took: %f milliseconds\n", milliseconds);
getchar();*/
//****************************************************************************************
hipMalloc((void**) &device_x_total, arraySize* sizeof(double));
hipMalloc((void**) &device_x_not, arraySize*sizeof(double));
hipMalloc((void**) &device_f_not, arraySize*sizeof(double));
hipMalloc((void**) &device_f, arraySize*sizeof(double));
for (iter = 0; iter<itermax10; iter++) { // now, we perform itermax10 iterations for the 10th order method
printf("iter = %d\n", iter);
////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
/*for (int i = 0; i<neqns; i++) x1[i] = x0[i] + h*(b10*f0[i] + b11*f1[i] + b12*f2[i] + b13*f3[i] + b14*f4[i]);
for (int i = 0; i<neqns; i++) x2[i] = x0[i] + h*(b20*f0[i] + b21*f1[i] + b22*f2[i] + b23*f3[i] + b24*f4[i]);
for (int i = 0; i<neqns; i++) x3[i] = x0[i] + h*(b30*f0[i] + b31*f1[i] + b32*f2[i] + b33*f3[i] + b34*f4[i]);
for (int i = 0; i<neqns; i++) x4[i] = x0[i] + h*(b40*f0[i] + b41*f1[i] + b42*f2[i] + b43*f3[i] + b44*f4[i]);*/
//*************Copying over f_total f0 and x0*******************************
hipMemcpy(device_f, f_total, arraySize*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_f_not, f0, arraySize*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_x_not, x0, arraySize*sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( order10Kernel), dim3(1),dim3(neqns), 0, 0, device_x_total,device_x_not,device_f_not, h,device_f);
hipMemcpy(x_total, device_x_total, arraySize*sizeof(double), hipMemcpyDeviceToHost);
//**********************************************************************************************************************
//////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
//der(t0 + a1*h, x1, f1); // now, evaluate the derivatives at these four points
//der(t0 + a2*h, x2, f2);
//der(t0 + a3*h, x3, f3);
//der(t0 + a4*h, x4, f4);
//************************************************************************************
hipMalloc((void**) &device_x_total, arraySize* sizeof(double));
hipMalloc((void**) &device_f, arraySize * sizeof(double));
//copying over t and x[]
hipMemcpy(device_x_total, x_total, arraySize*sizeof(double), hipMemcpyHostToDevice);
//kernel call
hipLaunchKernelGGL(( derKernel), dim3(1),dim3(numOfXArrays), 0, 0, device_x_total, device_f);
//writeback
hipMemcpy(f_total, device_f, arraySize*sizeof(double), hipMemcpyDeviceToHost);
//*****************************************************************************************
//////////////////// END OF PARALLEL SECTION OF CODE ///////////////////////
}
hipFree(device_x_total);
hipFree(device_x_not);
hipFree(device_f_not);
hipFree(device_f);
memcpy(x1,x_total, 2*sizeof(double));
memcpy(x2,x_total +2, 2*sizeof(double));
memcpy(x3,x_total +4, 2 *sizeof(double));
memcpy(x4, x_total +6, 2*sizeof(double));
memcpy(f1,f_total, 2*sizeof(double));
memcpy(f2,f_total +2, 2*sizeof(double));
memcpy(f3,f_total +4, 2 *sizeof(double));
memcpy(f4, f_total +6, 2*sizeof(double));
for (int i = 0; i<neqns; i++) x5[i] = x0[i] + h*(b50*f0[i] + b51*f1[i] + b52*f2[i] + b53*f3[i] + b54*f4[i]); // now get x5
der(t0 + a5*h, x5, f5); // and get the derivative there, f5
for (int i = 0; i<neqns; i++) {
xn10[i] = x0[i] + h*(c0*f0[i] + c1*f1[i] + c2*f2[i] + c3*f3[i] + c4*f4[i] + c5*f5[i]); // now compute final 10th order answer
}
if (sho) {
printf("10th order iterations = %d, t = %8.5lf, xn10[0] = %18.15lf, xn10[1] = %18.15lf, error[0] = %e, error[1] = %e\n",
itermax10, t0 + h, xn10[0], xn10[1], xn10[0] - sin(t0 + h), xn10[1] - cos(t0 + h));
}
else {
printf("10th order iterations = %d, t = %8.5lf, xn10[0] = %18.15lf, xn10[1] = %18.15lf\n",
itermax10, t0 + h, xn10[0], xn10[1]);
}
//////////////////// THIS CAN BE DONE IN PARALLEL ///////////////////////
//for (int i = 0; i<neqns; i++) {
// x1[i] = x0[i] + h*(g10*f0[i] + g11*f1[i] + g12*f2[i] + g13*f3[i] + g14*f4[i] + g15*f5[i]); // these fk's are from 10th order method,
// x2[i] = x0[i] + h*(g20*f0[i] + g21*f1[i] + g22*f2[i] + g23*f3[i] + g24*f4[i] + g25*f5[i]); // and note that they are being
// x3[i] = x0[i] + h*(g30*f0[i] + g31*f1[i] + g32*f2[i] + g33*f3[i] + g34*f4[i] + g35*f5[i]); // used to build the five internal values
// x4[i] = x0[i] + h*(g40*f0[i] + g41*f1[i] + g42*f2[i] + g43*f3[i] + g44*f4[i] + g45*f5[i]); // used to construct the 12th order xk's,
// x5[i] = x0[i] + h*(g50*f0[i] + g51*f1[i] + g52*f2[i] + g53*f3[i] + g54*f4[i] + g55*f5[i]); // so these xk's are for the 12th order method
//}
//***************************************************************************************************************************************************
//copying f arrays to a single array f_total
memcpy(f_total,f1, 2*sizeof(double));
memcpy(f_total+2,f2, 2*sizeof(double));
memcpy(f_total+4,f3, 2 *sizeof(double));
memcpy(f_total+6, f4, 2*sizeof(double));
memcpy(f_total+8, f5, 2*sizeof(double));
//allocating memory
hipMalloc((void**) &device_x_total, arraySize*sizeof(double));
hipMalloc((void**) &device_f, arraySize*sizeof(double));
hipMalloc((void**) &device_x_not, arraySize*sizeof(double));
hipMalloc((void**) &device_f_not, arraySize*sizeof(double));
//copying over f0, x0, and f_total
hipMemcpy(device_f,f_total, arraySize*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_x_not, x0, arraySize*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_f_not, f0, arraySize*sizeof(double), hipMemcpyHostToDevice);
//calling order10 kernel
hipLaunchKernelGGL(( Order10FkKernel), dim3(1), dim3(neqns), 0, 0, device_x_total, device_x_not, device_f_not, h, device_f);
//WriteBack of x_total
hipMemcpy(x_total, device_x_total, arraySize*sizeof(double), hipMemcpyDeviceToHost);
//***************************************************************************************************************************************************
//////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
//der(t0 + a1*h, x1, f1);
//der(t0 + a2*h, x2, f2); // now we get the fk's to be used in the 12th order method
//der(t0 + a3*h, x3, f3);
//der(t0 + a4*h, x4, f4); // i.e., obtain derivatives at the five internal points needed for 12th order method
//der(t0 + a5*h, x5, f5);
//********************************************************************************************************************
numOfXArrays=5; //because we are passing in X1-X5
hipMemcpy(device_x_total, x_total, arraySize*sizeof(double), hipMemcpyHostToDevice);
//kernel call
hipLaunchKernelGGL(( derKernel), dim3(1),dim3(numOfXArrays), 0, 0, device_x_total, device_f);
//copying data from device to host
hipMemcpy(f_total, device_f, arraySize*sizeof(double), hipMemcpyDeviceToHost);
//****************************************************************************************************************************
//////////////////// END OF PARALLEL SECTION OF CODE ///////////////////////
for (iter = 0; iter<itermax12; iter++) { // now we can iterate to improve the values at the five internal points
//////////////////// THIS CAN BE DONE IN PARALLEL ///////////////////////
//for (int i = 0; i<neqns; i++) { // each time, we recompute the internal xk values used in the 12th order method
// x1[i] = x0[i] + h*(bh10*f0[i] + bh11*f1[i] + bh12*f2[i] + bh13*f3[i] + bh14*f4[i] + bh15*f5[i]);
// x2[i] = x0[i] + h*(bh20*f0[i] + bh21*f1[i] + bh22*f2[i] + bh23*f3[i] + bh24*f4[i] + bh25*f5[i]);
// x3[i] = x0[i] + h*(bh30*f0[i] + bh31*f1[i] + bh32*f2[i] + bh33*f3[i] + bh34*f4[i] + bh35*f5[i]);
// x4[i] = x0[i] + h*(bh40*f0[i] + bh41*f1[i] + bh42*f2[i] + bh43*f3[i] + bh44*f4[i] + bh45*f5[i]);
// x5[i] = x0[i] + h*(bh50*f0[i] + bh51*f1[i] + bh52*f2[i] + bh53*f3[i] + bh54*f4[i] + bh55*f5[i]);
//}
hipMemcpy(device_f, f_total, arraySize*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_x_not, x0, arraySize*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_f_not, f0, arraySize*sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Order12Kernel), dim3(1),dim3(neqns), 0, 0, device_x_total, device_x_not, device_f_not, h, device_f);
hipMemcpy(x_total, device_x_total, arraySize*sizeof(double), hipMemcpyDeviceToHost);
//////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
//der(t0 + a1*h, x1, f1); // once again, obtain derivatives at the five internal points of the 12th order method
//der(t0 + a2*h, x2, f2);
//der(t0 + a3*h, x3, f3);
//der(t0 + a4*h, x4, f4);
//der(t0 + a5*h, x5, f5);
//////////////////// END OF PARALLEL SECTION OF CODE ///////////////////////
//*******************************************************************************************************************
hipMemcpy(device_x_total, x_total, arraySize*sizeof(double), hipMemcpyHostToDevice);
//kernel call
hipLaunchKernelGGL(( derKernel), dim3(1),dim3(numOfXArrays), 0, 0, device_x_total, device_f);
//copying data from device to host
hipMemcpy(f_total, device_f, arraySize*sizeof(double), hipMemcpyDeviceToHost);
}
hipFree(device_x_total);
hipFree(device_f);
hipFree(device_x_not);
hipFree(device_f_not);
memcpy(f1,f_total, 2*sizeof(double));
memcpy(f2,f_total +2, 2*sizeof(double));
memcpy(f3,f_total +4, 2 *sizeof(double));
memcpy(f4, f_total +6, 2*sizeof(double));
memcpy(f5, f_total +8, 2*sizeof(double));
memcpy(x1,x_total, 2*sizeof(double));
memcpy(x2,x_total +2, 2*sizeof(double));
memcpy(x3,x_total +4, 2 *sizeof(double));
memcpy(x4, x_total +6, 2*sizeof(double));
memcpy(x5, x_total +8, 2*sizeof(double));
for (int i = 0; i<neqns; i++) { // iteration complete, so now compute final base value for 12th order method
x6[i] = x0[i] + h*(bh60*f0[i] + bh61*f1[i] + bh62*f2[i] + bh63*f3[i] + bh64*f4[i] + bh65*f5[i]);
}
der(t0 + ah6*h, x6, f6); // and get the derivative there
for (int i = 0; i<neqns; i++) { // now, compute the final 12th order approximation to the solution at the end of the step
xn12[i] = x0[i] + h*(ch0*f0[i] + ch1*f1[i] + ch2*f2[i] + ch3*f3[i] + ch4*f4[i] + ch5*f5[i] + ch6*f6[i]); // now compute final 12th order answer
}
printf(" The estimates of the errors in the 10-th order method by differencing with 12-th order method results are %e and %e\n", xn10[0] - xn12[0], xn10[1] - xn12[1]);
if (sho) {
printf("12th order iterations = %d, t = %8.5lf, xn12[0] = %18.15lf, xn12[1] = %18.15lf, error[0] = %e, error[1] = %e\n",
iter, t0 + h, xn12[0], xn12[1], xn12[0] - sin(t0 + h), xn12[1] - cos(t0 + h));
}
else {
printf("12th order iterations = %d, t = %8.5lf, xn12[0] = %18.15lf, xn12[1] = %18.15lf\n",
iter, t0 + h, xn12[0], xn12[1]);
}
est = 1.0e-30;
for (int i = 0; i<neqns; i++) { // now, just update the solution to prepare for the next step
esti = xn10[i] - xn12[i];
est = est + esti*esti;
}
est = sqrt(est); // sqrt of the sum of the squares of the errors in each component of the solution at t0 + h
hnew = h * pow(tol10 / est, 0.1);
if (est < tol) { // if error estimate is less than the error tolerance, then the step succeeded
printf("The step succeeded since est = %e was less than tol = %e\n\n", est, tol);
for (int i = 0; i<neqns; i++) { // now, just update the solution to prepare for the next step
x0[i] = xn12[i];
}
t0 = t0 + h; // and update the independent variable
if (t0 / tf >= 0.99999999999999) finished = true; // and if we have reached the final value, tf, set finished to true
}
else {
printf("The step failed since est = %e was not less than tol = %e\n\n", est, tol);
}
h = hnew; // in any event, if not finished, we set the stepsize, h, to the new value, hnew
if ((t0 + h) > tf) h = tf - t0; // if new step takes us past final value, tf, reduce it to tf-t0
}
}
int main(int argc, char* argv[])
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
printf("Testing Implicit RK1210 ");
if (sho) {
printf(" for simple harmonic oscillator example problem \n\n");
}
else {
printf(" for predator - prey example problem \n\n");
}
rk1210();
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds =0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Code took: %f milliseconds\n", milliseconds);
getchar();
return 0;
}
| 0796ceb9275585aa0417599f77dc8f39da942d17.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#define PI 3.1415926535897932
#define MAXEQNS 10 // maximum number of differential equations in the system
const int itermax10 = 2; // number of iterations to use for rk10
const int itermax12 = 1; // number of additional iterations to use for rk12
const int neqns = 2; // number of differential equations in the system
const double tol = 1.0e-10; // the error tolerance
const double tol10 = tol / 10;
const bool sho = true; // set sho to true if you want the simple harmonic oscillator results
// set sho to false, if you want the predator - prey results
// the following constants are the 10th order method's coefficients
const double a0 = 0;
__constant__ double a1 = 0.11747233803526765;
__constant__ double a2 = 0.35738424175967745;
__constant__ double a3 = 0.64261575824032255;
__constant__ double a4 = 0.88252766196473235;
const double a5 = 1.0000000000000000;
__constant__ double b10 = 0.047323231137709573;
__constant__ double b11 = 0.077952072407795078;
__constant__ double b12 = -0.010133421269900587;
__constant__ double b13 = 0.0028864915990617097;
__constant__ double b14 = -0.00055603583939812082;
__constant__ double b20 = 0.021779075831486075;
__constant__ double b21 = 0.22367959757928498;
__constant__ double b22 = 0.12204792759220492;
__constant__ double b23 = -0.012091266674498959;
__constant__ double b24 = 0.0019689074312004371;
__constant__ double b30 = 0.044887590835180592;
__constant__ double b31 = 0.15973856856089786;
__constant__ double b32 = 0.32285378852557547;
__constant__ double b33 = 0.12204792759220492;
__constant__ double b34 = -0.0069121172735362915;
__constant__ double b40 = 0.019343435528957094;
__constant__ double b41 = 0.22312684732165494;
__constant__ double b42 = 0.23418268877986459;
__constant__ double b43 = 0.32792261792646064;
__constant__ double b44 = 0.077952072407795078;
const double b50 = 0.066666666666666667;
const double b51 = 0.10981508874708385;
const double b52 = 0.37359383699761912;
const double b53 = 0.18126454003786724;
const double b54 = 0.26865986755076313;
const double c0 = 0.033333333333333333;
const double c1 = 0.18923747814892349;
const double c2 = 0.27742918851774318;
const double c3 = 0.27742918851774318;
const double c4 = 0.18923747814892349;
const double c5 = 0.033333333333333333;
// the following coefficients allow us to get rk12 internal xk values from rk10 fk values
__constant__ double g10 = 0.043407276098971173;
__constant__ double g11 = 0.049891561330903419;
__constant__ double g12 = -0.012483721919363355;
__constant__ double g13 = 0.0064848904066894701;
__constant__ double g14 = -0.0038158693974615597;
__constant__ double g15 = 0.0014039153409773882;
__constant__ double g20 = 0.030385164419638569;
__constant__ double g21 = 0.19605322645426044;
__constant__ double g22 = 0.047860687574395354;
__constant__ double g23 = -0.012887249003100515;
__constant__ double g24 = 0.0064058521980400821;
__constant__ double g25 = -0.0022420783785910372;
__constant__ double g30 = 0.032291666666666667;
__constant__ double g31 = 0.19311806292811784;
__constant__ double g32 = 0.25797759963091718;
__constant__ double g33 = 0.019451588886825999;
__constant__ double g34 = -0.0038805847791943522;
__constant__ double g35 = 0.0010416666666666667;
__constant__ double g40 = 0.035575411711924371;
__constant__ double g41 = 0.18283162595088341;
__constant__ double g42 = 0.29031643752084369;
__constant__ double g43 = 0.22956850094334782;
__constant__ double g44 = -0.0068157483053369507;
__constant__ double g45 = 0.0029481689136947641;
__constant__ double g50 = 0.031929417992355945;
__constant__ double g51 = 0.19305334754638505;
__constant__ double g52 = 0.27094429811105371;
__constant__ double g53 = 0.28991291043710653;
__constant__ double g54 = 0.13934591681802007;
__constant__ double g55 = -0.010073942765637839;
const double g60 = 0.033333333333333333;
const double g61 = 0.18923747814892349;
const double g62 = 0.27742918851774318;
const double g63 = 0.27742918851774318;
const double g64 = 0.18923747814892349;
const double g65 = 0.033333333333333333;
// the following constants are the 12th order method's coefficients
const double ah0 = 0.0;
const double ah1 = 0.084888051860716535;
const double ah2 = 0.26557560326464289;
const double ah3 = 0.50000000000000000;
const double ah4 = 0.73442439673535711;
const double ah5 = 0.91511194813928346;
const double ah6 = 1.0000000000000000;
__constant__ double bh10 = 0.033684534770907752;
__constant__ double bh11 = 0.057301749935629582;
__constant__ double bh12 = -0.0082444880936983822;
__constant__ double bh13 = 0.0029151263642014432;
__constant__ double bh14 = -0.00096482361331657787;
__constant__ double bh15 = 0.00019595249699271744;
__constant__ double bh20 = 0.015902242088596380;
__constant__ double bh21 = 0.16276437062291593;
__constant__ double bh22 = 0.096031583397703751;
__constant__ double bh23 = -0.011758319711158930;
__constant__ double bh24 = 0.0032543514515832418;
__constant__ double bh25 = -0.00061862458499748489;
__constant__ double bh30 = 0.031250000000000000;
__constant__ double bh31 = 0.11881843285766042;
__constant__ double bh32 = 0.24868761828096535;
__constant__ double bh33 = 0.11000000000000000;
__constant__ double bh34 = -0.010410996557394222;
__constant__ double bh35 = 0.0016549454187684515;
__constant__ double bh40 = 0.015902242088596380;
__constant__ double bh41 = 0.15809680304274781;
__constant__ double bh42 = 0.18880881534382426;
__constant__ double bh43 = 0.28087114502765051;
__constant__ double bh44 = 0.096031583397703751;
__constant__ double bh45 = -0.0052861921651656089;
__constant__ double bh50 = 0.033684534770907752;
__constant__ double bh51 = 0.11440754737426645;
__constant__ double bh52 = 0.24657204460460206;
__constant__ double bh53 = 0.20929436236889375;
__constant__ double bh54 = 0.25385170908498387;
__constant__ double bh55 = 0.057301749935629582;
const double bh60 = 0;
const double bh61 = 0.19581988897471611;
const double bh62 = 0.14418011102528389;
const double bh63 = 0.32000000000000000;
const double bh64 = 0.14418011102528389;
const double bh65 = 0.19581988897471611;
const double ch0 = 0.023809523809523810;
const double ch1 = 0.13841302368078297;
const double ch2 = 0.21587269060493131;
const double ch3 = 0.24380952380952381;
const double ch4 = 0.21587269060493131;
const double ch5 = 0.13841302368078297;
const double ch6 = 0.023809523809523810;
#define cudaErrorCheck(call) { cudaAssert(call,__FILE__,__LINE__); }
void cudaAssert(const cudaError err, const char *file, const int line)
{
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n",
file, line, cudaGetErrorString(err) );
getchar();
exit(1);
}
}
//****************************************************************************
//Derivative kernel: takes pointers to x[], and f[] allocated on the device
__global__ void derKernel(double* device_x, double* device_f)
{
//2 elements in device_x represent 2 elements from individual arrays X1-X4;
//ie if thread id is 0 then the array number is 0x2 and work on elements tx*2 and tx*2 +1
int tx = threadIdx.x;
int xArrayNumber = tx *2;
device_f[xArrayNumber] = device_x[xArrayNumber+1];
__syncthreads();
device_f[xArrayNumber+1] = -device_x[xArrayNumber];
__syncthreads();
}
__global__ void guessKernel(double*device_X_Total, double* device_X_Not,double* device_F_Not, double h){
device_X_Total[threadIdx.x] = device_X_Not[threadIdx.x] + a1 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +2] = device_X_Not[threadIdx.x] + a2 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +4] = device_X_Not[threadIdx.x] + a3 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +6] = device_X_Not[threadIdx.x] + a4 * h * device_F_Not[threadIdx.x];
}
__global__ void order10Kernel(double*device_X_Total, double* device_X_Not, double *device_F_Not, double h, double *device_f)
{
int tx = threadIdx.x;
device_X_Total[tx]=device_X_Not[tx] + h*((b10 * device_F_Not[tx]) + (b11 * device_f[tx]) + (b12 * device_f[tx+2]) + (b13 * device_f[tx +4]) + (b14 * device_f[tx +6]));
__syncthreads();
device_X_Total[tx+2]=device_X_Not[tx] + h*((b20 * device_F_Not[tx]) + (b21 * device_f[tx]) + (b22 * device_f[tx+2]) + (b23 * device_f[tx +4]) + (b24 * device_f[tx +6]));
__syncthreads();
device_X_Total[tx+4]=device_X_Not[tx] + h*((b30 * device_F_Not[tx]) +( b31 * device_f[tx]) + (b32 * device_f[tx+2]) + (b33 * device_f[tx +4]) + (b34 * device_f[tx +6]));
__syncthreads();
device_X_Total[tx+6]=device_X_Not[tx] + h*((b40 * device_F_Not[tx]) + (b41 * device_f[tx]) +( b42 * device_f[tx+2]) + (b43 * device_f[tx +4]) +( b44 * device_f[tx +6]));
__syncthreads();
}
__global__ void Order10FkKernel(double*device_X_Total, double* device_X_Not, double* device_F_Not, double h, double*device_f)
{
int tx = threadIdx.x;
device_X_Total[tx] = device_X_Not[tx] + h*((g10*device_F_Not[tx])+ (g11 * device_f[tx]) + (g12 * device_f[tx+2])+ (g13 * device_f[tx + 4]) + (g14 * device_f[tx+ 6])+ (g15 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+2] = device_X_Not[tx] + h*((g20*device_F_Not[tx])+ (g21 * device_f[tx]) + (g22 * device_f[tx+2])+ (g23 * device_f[tx + 4]) + (g24 * device_f[tx+ 6])+ (g25 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+4] = device_X_Not[tx] + h*((g30*device_F_Not[tx])+ (g31 * device_f[tx]) + (g32 * device_f[tx+2])+ (g33 * device_f[tx + 4]) + (g34 * device_f[tx+ 6])+ (g35 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+6] = device_X_Not[tx] + h*((g40*device_F_Not[tx])+ (g41 * device_f[tx]) + (g42 * device_f[tx+2])+ (g43 * device_f[tx + 4]) + (g44 * device_f[tx+ 6])+ (g45 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+8] = device_X_Not[tx] + h*((g50*device_F_Not[tx])+ (g51 * device_f[tx]) + (g52 * device_f[tx+2])+ (g53 * device_f[tx + 4]) + (g54 * device_f[tx+ 6])+ (g55 *device_f[tx+8]));
__syncthreads();
}
__global__ void Order12Kernel(double*device_X_Total, double* device_X_Not, double* device_F_Not, double h, double*device_f){
int tx = threadIdx.x;
device_X_Total[tx] = device_X_Not[tx] + h*((bh10*device_F_Not[tx])+ (bh11 * device_f[tx]) + (bh12 * device_f[tx+2])+ (bh13 * device_f[tx + 4]) + (bh14 * device_f[tx+ 6])+ (bh15 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+2] = device_X_Not[tx] + h*((bh20*device_F_Not[tx])+ (bh21 * device_f[tx]) + (bh22 * device_f[tx+2])+ (bh23 * device_f[tx + 4]) + (bh24 * device_f[tx+ 6])+ (bh25 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+4] = device_X_Not[tx] + h*((bh30*device_F_Not[tx])+ (bh31 * device_f[tx]) + (bh32 * device_f[tx+2])+ (bh33 * device_f[tx + 4]) + (bh34 * device_f[tx+ 6])+ (bh35 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+6] = device_X_Not[tx] + h*((bh40*device_F_Not[tx])+ (bh41 * device_f[tx]) + (bh42 * device_f[tx+2])+ (bh43 * device_f[tx + 4]) + (bh44 * device_f[tx+ 6])+ (bh45 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+8] = device_X_Not[tx] + h*((bh50*device_F_Not[tx])+ (bh51 * device_f[tx]) + (bh52 * device_f[tx+2])+ (bh53 * device_f[tx + 4]) + (bh54 * device_f[tx+ 6])+ (bh55 *device_f[tx+8]));
__syncthreads();
}
//****************************************************************************
// the following function describes the ordinary differential equations
//** The function is still live for the non parallel function calls to der
//** still active in the program.
void der(double t, double x[], double f[]) {
if (sho) {
f[0] = x[1];
f[1] = -x[0];
}
else {
f[0] = x[0] * (2.0 - x[1]);
f[1] = x[1] * (x[0] - 1.0);
}
}
void rk1210() {
// Implicit Runge-Kutta of orders 12 and 10
double x0[MAXEQNS], x1[MAXEQNS], x2[MAXEQNS], x3[MAXEQNS], x4[MAXEQNS];
double x5[MAXEQNS], x6[MAXEQNS], xn10[MAXEQNS], xn12[MAXEQNS];
double t0, tf, h, hnew, est, esti, f0[MAXEQNS], f1[MAXEQNS], f2[MAXEQNS];
double f3[MAXEQNS], f4[MAXEQNS], f5[MAXEQNS], f6[MAXEQNS];
int iter;
bool finished = false; // becomes true when we have reached tf
if (sho) {
h = PI / 4.0; // initial guess for stepsize to use
x0[0] = 0.0; // initial value of first component
x0[1] = 1.0; // initial value of second component
t0 = 0.0; // initial t value, t0
tf = 2 * PI; // final t value, tf
}
else {
h = 1.0 / 2.0; // initial guess for stepsize to use
x0[0] = 2.0; // initial value of first component
x0[1] = 2.0; // initial value of second component
t0 = 0.0; // initial t value, t0
tf = 4.0; // final t value, tf
}
printf("Initial conditions are t0 = %8.5lf, x0[0] = %18.15lf, x0[1] = %18.15lf\n", t0, x0[0], x0[1]);
const int arraySize = 10; //there will be 8 elements being written from x1-x4 (Remaining 2 for when X5 is included);
int numOfXArrays =4;
double x_total[arraySize];
double f_total[arraySize];
while (!finished) { // keep going until we reach tf successfully
der(t0, x0, f0); // first, we will get 10th order results
//////////////////// THIS CAN BE DONE IN PARALLEL ///////////////////////
//for (int i = 0; i<neqns; i++) {
// x1[i] = x0[i] + a1*h*f0[i]; // just guess that solution is a straight line initially
// x2[i] = x0[i] + a2*h*f0[i]; // at the four internal points within the step
// x3[i] = x0[i] + a3*h*f0[i];
// x4[i] = x0[i] + a4*h*f0[i];
//}
//*************************************************************************************
double* device_x_total; double* device_x_not; double* device_f_not; //creating variables for the device
//allocating memory for device variables
cudaMalloc((void**) &device_x_total, arraySize * sizeof(double));
cudaMalloc((void**) &device_x_not, arraySize * sizeof(double));
cudaMalloc((void**) &device_f_not, arraySize * sizeof(double));
//copying contents of x0 and f0 to the device variables
cudaMemcpy(device_x_not, x0, arraySize*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_f_not, f0, arraySize *sizeof(double), cudaMemcpyHostToDevice);
guessKernel<<<1, neqns>>>(device_x_total, device_x_not, device_f_not, h);
cudaMemcpy(x_total, device_x_total, arraySize*sizeof(double), cudaMemcpyDeviceToHost);
//************************************************************************************
//////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
//der(t0 + a1*h, x1, f1); // now, evaluate the derivatives at these four points
//der(t0 + a2*h, x2, f2);
//der(t0 + a3*h, x3, f3);
//der(t0 + a4*h, x4, f4);
//****************************************************************************************
double* device_f; //creating variables for device;
//allocating memory for x[], and f[]
cudaMalloc((void**) &device_x_total, arraySize* sizeof(double));
cudaMalloc((void**) &device_f, arraySize * sizeof(double));
//copying over t and x[]
cudaMemcpy(device_x_total, x_total, arraySize*sizeof(double), cudaMemcpyHostToDevice);
//*******Creating timers to test 4 arrays *********
/*cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);*/
//kernel call
derKernel<<<1,numOfXArrays>>>(device_x_total, device_f);
/* cudaEventRecord(stop);*/
//copying data from device to host
cudaMemcpy(f_total, device_f, arraySize*sizeof(double), cudaMemcpyDeviceToHost);
/*cudaEventSynchronize(stop);
float milliseconds =0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Kernel took: %f milliseconds\n", milliseconds);
getchar();*/
//****************************************************************************************
cudaMalloc((void**) &device_x_total, arraySize* sizeof(double));
cudaMalloc((void**) &device_x_not, arraySize*sizeof(double));
cudaMalloc((void**) &device_f_not, arraySize*sizeof(double));
cudaMalloc((void**) &device_f, arraySize*sizeof(double));
for (iter = 0; iter<itermax10; iter++) { // now, we perform itermax10 iterations for the 10th order method
printf("iter = %d\n", iter);
////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
/*for (int i = 0; i<neqns; i++) x1[i] = x0[i] + h*(b10*f0[i] + b11*f1[i] + b12*f2[i] + b13*f3[i] + b14*f4[i]);
for (int i = 0; i<neqns; i++) x2[i] = x0[i] + h*(b20*f0[i] + b21*f1[i] + b22*f2[i] + b23*f3[i] + b24*f4[i]);
for (int i = 0; i<neqns; i++) x3[i] = x0[i] + h*(b30*f0[i] + b31*f1[i] + b32*f2[i] + b33*f3[i] + b34*f4[i]);
for (int i = 0; i<neqns; i++) x4[i] = x0[i] + h*(b40*f0[i] + b41*f1[i] + b42*f2[i] + b43*f3[i] + b44*f4[i]);*/
//*************Copying over f_total f0 and x0*******************************
cudaMemcpy(device_f, f_total, arraySize*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_f_not, f0, arraySize*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_x_not, x0, arraySize*sizeof(double), cudaMemcpyHostToDevice);
order10Kernel<<<1,neqns>>>(device_x_total,device_x_not,device_f_not, h,device_f);
cudaMemcpy(x_total, device_x_total, arraySize*sizeof(double), cudaMemcpyDeviceToHost);
//**********************************************************************************************************************
//////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
//der(t0 + a1*h, x1, f1); // now, evaluate the derivatives at these four points
//der(t0 + a2*h, x2, f2);
//der(t0 + a3*h, x3, f3);
//der(t0 + a4*h, x4, f4);
//************************************************************************************
cudaMalloc((void**) &device_x_total, arraySize* sizeof(double));
cudaMalloc((void**) &device_f, arraySize * sizeof(double));
//copying over t and x[]
cudaMemcpy(device_x_total, x_total, arraySize*sizeof(double), cudaMemcpyHostToDevice);
//kernel call
derKernel<<<1,numOfXArrays>>>(device_x_total, device_f);
//writeback
cudaMemcpy(f_total, device_f, arraySize*sizeof(double), cudaMemcpyDeviceToHost);
//*****************************************************************************************
//////////////////// END OF PARALLEL SECTION OF CODE ///////////////////////
}
cudaFree(device_x_total);
cudaFree(device_x_not);
cudaFree(device_f_not);
cudaFree(device_f);
memcpy(x1,x_total, 2*sizeof(double));
memcpy(x2,x_total +2, 2*sizeof(double));
memcpy(x3,x_total +4, 2 *sizeof(double));
memcpy(x4, x_total +6, 2*sizeof(double));
memcpy(f1,f_total, 2*sizeof(double));
memcpy(f2,f_total +2, 2*sizeof(double));
memcpy(f3,f_total +4, 2 *sizeof(double));
memcpy(f4, f_total +6, 2*sizeof(double));
for (int i = 0; i<neqns; i++) x5[i] = x0[i] + h*(b50*f0[i] + b51*f1[i] + b52*f2[i] + b53*f3[i] + b54*f4[i]); // now get x5
der(t0 + a5*h, x5, f5); // and get the derivative there, f5
for (int i = 0; i<neqns; i++) {
xn10[i] = x0[i] + h*(c0*f0[i] + c1*f1[i] + c2*f2[i] + c3*f3[i] + c4*f4[i] + c5*f5[i]); // now compute final 10th order answer
}
if (sho) {
printf("10th order iterations = %d, t = %8.5lf, xn10[0] = %18.15lf, xn10[1] = %18.15lf, error[0] = %e, error[1] = %e\n",
itermax10, t0 + h, xn10[0], xn10[1], xn10[0] - sin(t0 + h), xn10[1] - cos(t0 + h));
}
else {
printf("10th order iterations = %d, t = %8.5lf, xn10[0] = %18.15lf, xn10[1] = %18.15lf\n",
itermax10, t0 + h, xn10[0], xn10[1]);
}
//////////////////// THIS CAN BE DONE IN PARALLEL ///////////////////////
//for (int i = 0; i<neqns; i++) {
// x1[i] = x0[i] + h*(g10*f0[i] + g11*f1[i] + g12*f2[i] + g13*f3[i] + g14*f4[i] + g15*f5[i]); // these fk's are from 10th order method,
// x2[i] = x0[i] + h*(g20*f0[i] + g21*f1[i] + g22*f2[i] + g23*f3[i] + g24*f4[i] + g25*f5[i]); // and note that they are being
// x3[i] = x0[i] + h*(g30*f0[i] + g31*f1[i] + g32*f2[i] + g33*f3[i] + g34*f4[i] + g35*f5[i]); // used to build the five internal values
// x4[i] = x0[i] + h*(g40*f0[i] + g41*f1[i] + g42*f2[i] + g43*f3[i] + g44*f4[i] + g45*f5[i]); // used to construct the 12th order xk's,
// x5[i] = x0[i] + h*(g50*f0[i] + g51*f1[i] + g52*f2[i] + g53*f3[i] + g54*f4[i] + g55*f5[i]); // so these xk's are for the 12th order method
//}
//***************************************************************************************************************************************************
//copying f arrays to a single array f_total
memcpy(f_total,f1, 2*sizeof(double));
memcpy(f_total+2,f2, 2*sizeof(double));
memcpy(f_total+4,f3, 2 *sizeof(double));
memcpy(f_total+6, f4, 2*sizeof(double));
memcpy(f_total+8, f5, 2*sizeof(double));
//allocating memory
cudaMalloc((void**) &device_x_total, arraySize*sizeof(double));
cudaMalloc((void**) &device_f, arraySize*sizeof(double));
cudaMalloc((void**) &device_x_not, arraySize*sizeof(double));
cudaMalloc((void**) &device_f_not, arraySize*sizeof(double));
//copying over f0, x0, and f_total
cudaMemcpy(device_f,f_total, arraySize*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_x_not, x0, arraySize*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_f_not, f0, arraySize*sizeof(double), cudaMemcpyHostToDevice);
//calling order10 kernel
Order10FkKernel<<<1, neqns>>>(device_x_total, device_x_not, device_f_not, h, device_f);
//WriteBack of x_total
cudaMemcpy(x_total, device_x_total, arraySize*sizeof(double), cudaMemcpyDeviceToHost);
//***************************************************************************************************************************************************
//////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
//der(t0 + a1*h, x1, f1);
//der(t0 + a2*h, x2, f2); // now we get the fk's to be used in the 12th order method
//der(t0 + a3*h, x3, f3);
//der(t0 + a4*h, x4, f4); // i.e., obtain derivatives at the five internal points needed for 12th order method
//der(t0 + a5*h, x5, f5);
//********************************************************************************************************************
numOfXArrays=5; //because we are passing in X1-X5
cudaMemcpy(device_x_total, x_total, arraySize*sizeof(double), cudaMemcpyHostToDevice);
//kernel call
derKernel<<<1,numOfXArrays>>>(device_x_total, device_f);
//copying data from device to host
cudaMemcpy(f_total, device_f, arraySize*sizeof(double), cudaMemcpyDeviceToHost);
//****************************************************************************************************************************
//////////////////// END OF PARALLEL SECTION OF CODE ///////////////////////
for (iter = 0; iter<itermax12; iter++) { // now we can iterate to improve the values at the five internal points
//////////////////// THIS CAN BE DONE IN PARALLEL ///////////////////////
//for (int i = 0; i<neqns; i++) { // each time, we recompute the internal xk values used in the 12th order method
// x1[i] = x0[i] + h*(bh10*f0[i] + bh11*f1[i] + bh12*f2[i] + bh13*f3[i] + bh14*f4[i] + bh15*f5[i]);
// x2[i] = x0[i] + h*(bh20*f0[i] + bh21*f1[i] + bh22*f2[i] + bh23*f3[i] + bh24*f4[i] + bh25*f5[i]);
// x3[i] = x0[i] + h*(bh30*f0[i] + bh31*f1[i] + bh32*f2[i] + bh33*f3[i] + bh34*f4[i] + bh35*f5[i]);
// x4[i] = x0[i] + h*(bh40*f0[i] + bh41*f1[i] + bh42*f2[i] + bh43*f3[i] + bh44*f4[i] + bh45*f5[i]);
// x5[i] = x0[i] + h*(bh50*f0[i] + bh51*f1[i] + bh52*f2[i] + bh53*f3[i] + bh54*f4[i] + bh55*f5[i]);
//}
cudaMemcpy(device_f, f_total, arraySize*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_x_not, x0, arraySize*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_f_not, f0, arraySize*sizeof(double), cudaMemcpyHostToDevice);
Order12Kernel<<<1,neqns>>>(device_x_total, device_x_not, device_f_not, h, device_f);
cudaMemcpy(x_total, device_x_total, arraySize*sizeof(double), cudaMemcpyDeviceToHost);
//////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
//der(t0 + a1*h, x1, f1); // once again, obtain derivatives at the five internal points of the 12th order method
//der(t0 + a2*h, x2, f2);
//der(t0 + a3*h, x3, f3);
//der(t0 + a4*h, x4, f4);
//der(t0 + a5*h, x5, f5);
//////////////////// END OF PARALLEL SECTION OF CODE ///////////////////////
//*******************************************************************************************************************
cudaMemcpy(device_x_total, x_total, arraySize*sizeof(double), cudaMemcpyHostToDevice);
//kernel call
derKernel<<<1,numOfXArrays>>>(device_x_total, device_f);
//copying data from device to host
cudaMemcpy(f_total, device_f, arraySize*sizeof(double), cudaMemcpyDeviceToHost);
}
cudaFree(device_x_total);
cudaFree(device_f);
cudaFree(device_x_not);
cudaFree(device_f_not);
memcpy(f1,f_total, 2*sizeof(double));
memcpy(f2,f_total +2, 2*sizeof(double));
memcpy(f3,f_total +4, 2 *sizeof(double));
memcpy(f4, f_total +6, 2*sizeof(double));
memcpy(f5, f_total +8, 2*sizeof(double));
memcpy(x1,x_total, 2*sizeof(double));
memcpy(x2,x_total +2, 2*sizeof(double));
memcpy(x3,x_total +4, 2 *sizeof(double));
memcpy(x4, x_total +6, 2*sizeof(double));
memcpy(x5, x_total +8, 2*sizeof(double));
for (int i = 0; i<neqns; i++) { // iteration complete, so now compute final base value for 12th order method
x6[i] = x0[i] + h*(bh60*f0[i] + bh61*f1[i] + bh62*f2[i] + bh63*f3[i] + bh64*f4[i] + bh65*f5[i]);
}
der(t0 + ah6*h, x6, f6); // and get the derivative there
for (int i = 0; i<neqns; i++) { // now, compute the final 12th order approximation to the solution at the end of the step
xn12[i] = x0[i] + h*(ch0*f0[i] + ch1*f1[i] + ch2*f2[i] + ch3*f3[i] + ch4*f4[i] + ch5*f5[i] + ch6*f6[i]); // now compute final 12th order answer
}
printf(" The estimates of the errors in the 10-th order method by differencing with 12-th order method results are %e and %e\n", xn10[0] - xn12[0], xn10[1] - xn12[1]);
if (sho) {
printf("12th order iterations = %d, t = %8.5lf, xn12[0] = %18.15lf, xn12[1] = %18.15lf, error[0] = %e, error[1] = %e\n",
iter, t0 + h, xn12[0], xn12[1], xn12[0] - sin(t0 + h), xn12[1] - cos(t0 + h));
}
else {
printf("12th order iterations = %d, t = %8.5lf, xn12[0] = %18.15lf, xn12[1] = %18.15lf\n",
iter, t0 + h, xn12[0], xn12[1]);
}
est = 1.0e-30;
for (int i = 0; i<neqns; i++) { // now, just update the solution to prepare for the next step
esti = xn10[i] - xn12[i];
est = est + esti*esti;
}
est = sqrt(est); // sqrt of the sum of the squares of the errors in each component of the solution at t0 + h
hnew = h * pow(tol10 / est, 0.1);
if (est < tol) { // if error estimate is less than the error tolerance, then the step succeeded
printf("The step succeeded since est = %e was less than tol = %e\n\n", est, tol);
for (int i = 0; i<neqns; i++) { // now, just update the solution to prepare for the next step
x0[i] = xn12[i];
}
t0 = t0 + h; // and update the independent variable
if (t0 / tf >= 0.99999999999999) finished = true; // and if we have reached the final value, tf, set finished to true
}
else {
printf("The step failed since est = %e was not less than tol = %e\n\n", est, tol);
}
h = hnew; // in any event, if not finished, we set the stepsize, h, to the new value, hnew
if ((t0 + h) > tf) h = tf - t0; // if new step takes us past final value, tf, reduce it to tf-t0
}
}
int main(int argc, char* argv[])
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
printf("Testing Implicit RK1210 ");
if (sho) {
printf(" for simple harmonic oscillator example problem \n\n");
}
else {
printf(" for predator - prey example problem \n\n");
}
rk1210();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds =0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Code took: %f milliseconds\n", milliseconds);
getchar();
return 0;
}
|
f81d986dfcbd9d3bd6b861f89ae85b471003ee8a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include <limits>
#include <vector>
#include "modules/perception/inference/tensorrt/plugins/argmax_plugin.h"
namespace apollo {
namespace perception {
namespace inference {
__global__ void cmp(const int nthreads, const float *in_data,
const int channels, const int height, const int width,
const bool out_max_val, float *out_data) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < nthreads) {
int w = idx % width;
idx = idx / width;
int h = idx % height;
idx = idx / height;
int c = idx % channels;
int n = idx / channels;
if (c != 0) {
return;
}
int c_max = 0;
float v_max = std::numeric_limits<float>::min();
for (int c = 0; c < channels; c++) {
int in_idx = ((n * channels + c) * height + h) * width + w;
if (v_max < in_data[in_idx]) {
v_max = in_data[in_idx];
c_max = c;
}
}
int out_idx_idx = ((n * channels + 0) * height + h) * width + w;
out_data[out_idx_idx] = c_max;
if (out_max_val) {
int out_val_idx = ((n * channels + 1) * height + h) * width + w;
out_data[out_val_idx] = v_max;
}
}
}
int ArgMax1Plugin::enqueue(int batchSize, const void *const *inputs,
void **outputs, void *workspace,
hipStream_t stream) {
const int thread_size = 512;
int block_size =
(input_dims_.d[0] * input_dims_.d[1] * input_dims_.d[2] * batchSize +
thread_size - 1) /
thread_size;
hipLaunchKernelGGL(( cmp), dim3(block_size), dim3(thread_size), 0, 0,
input_dims_.d[0] * input_dims_.d[1] * input_dims_.d[2] * batchSize,
(const float *)inputs[0], input_dims_.d[0], input_dims_.d[1],
input_dims_.d[2], out_max_val_, reinterpret_cast<float *>(outputs[0]));
return 0;
}
} // namespace inference
} // namespace perception
} // namespace apollo
| f81d986dfcbd9d3bd6b861f89ae85b471003ee8a.cu | /******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include <limits>
#include <vector>
#include "modules/perception/inference/tensorrt/plugins/argmax_plugin.h"
namespace apollo {
namespace perception {
namespace inference {
__global__ void cmp(const int nthreads, const float *in_data,
const int channels, const int height, const int width,
const bool out_max_val, float *out_data) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < nthreads) {
int w = idx % width;
idx = idx / width;
int h = idx % height;
idx = idx / height;
int c = idx % channels;
int n = idx / channels;
if (c != 0) {
return;
}
int c_max = 0;
float v_max = std::numeric_limits<float>::min();
for (int c = 0; c < channels; c++) {
int in_idx = ((n * channels + c) * height + h) * width + w;
if (v_max < in_data[in_idx]) {
v_max = in_data[in_idx];
c_max = c;
}
}
int out_idx_idx = ((n * channels + 0) * height + h) * width + w;
out_data[out_idx_idx] = c_max;
if (out_max_val) {
int out_val_idx = ((n * channels + 1) * height + h) * width + w;
out_data[out_val_idx] = v_max;
}
}
}
int ArgMax1Plugin::enqueue(int batchSize, const void *const *inputs,
void **outputs, void *workspace,
cudaStream_t stream) {
const int thread_size = 512;
int block_size =
(input_dims_.d[0] * input_dims_.d[1] * input_dims_.d[2] * batchSize +
thread_size - 1) /
thread_size;
cmp<<<block_size, thread_size>>>(
input_dims_.d[0] * input_dims_.d[1] * input_dims_.d[2] * batchSize,
(const float *)inputs[0], input_dims_.d[0], input_dims_.d[1],
input_dims_.d[2], out_max_val_, reinterpret_cast<float *>(outputs[0]));
return 0;
}
} // namespace inference
} // namespace perception
} // namespace apollo
|
00ff5b7d90ef2f179baa1e4a8d62751157460ae6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "CImg.h"
#include <vector>
#include <iostream>
#include <chrono>
using namespace cimg_library;
#define BLOCK_SIZEX 16
#define BLOCK_SIZEY 16
// Biggest picture will be (65535*BLOCK_SIZEX, 65535*BLOCK_SIZEY)
// If we increase BLOCK_SIZE we can get even bigger pictures. (Block_size = 32)
struct rgb
{
unsigned char r;
unsigned char g;
unsigned char b;
};
int matrixAt(int x, int y, unsigned int width)
{
return x + y * width;
}
__device__ int matrixAt_d(int x, int y, unsigned int width)
{
return x + y * width;
}
__global__ void dataTransformation(rgb* AoS_d, unsigned char* SoA_d, unsigned int width, unsigned int height, unsigned int color)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
// Don't go out of memory
if (idx < width && idy < height)
{
// Causes no thread divergence since it is only called with a constant color
if (color == 0)
{
SoA_d[matrixAt_d(idx, idy, width)] = AoS_d[matrixAt_d(idx, idy, width)].r;
}
else if (color == 1)
{
SoA_d[matrixAt_d(idx, idy, width)] = AoS_d[matrixAt_d(idx, idy, width)].g;
}
else if (color == 2)
{
SoA_d[matrixAt_d(idx, idy, width)] = AoS_d[matrixAt_d(idx, idy, width)].b;
}
}
}
__global__ void ImageBlur(unsigned char* SoA_d, unsigned char* SoA_blur, double* mask_d, unsigned int width, unsigned int height)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
double tempSum = 0;
// Do not go outside the memory
if (idx < width && idy < height)
{
for (int x = -2; x <= 2; x++)
{
for (int y = -2; y <= 2; y++)
{
// Do not sum values from outside the picture
if (idx + x >= 0 && idx + x < width && idy + y >= 0 && idy + y < height)
{
tempSum += mask_d[matrixAt_d(x + 2, y + 2, 5)] * (double)SoA_d[matrixAt_d(idx + x, idy + y, width)];
}
}
}
}
// Clamp the value to 0-255
if (tempSum > 255) tempSum = 255;
else if (tempSum < 0) tempSum = 0;
SoA_blur[matrixAt_d(idx, idy, width)] = (char)tempSum;
}
void showImage(unsigned char* SoA[3], CImg<unsigned char>& image)
{
// For showing the picture with Cimg
for (int x = 0; x < image._width; x++)
{
for (int y = 0; y < image._height; y++)
{
image(x, y, 0, 0) = SoA[0][matrixAt(x, y, image._width)];
image(x, y, 0, 1) = SoA[1][matrixAt(x, y, image._width)];
image(x, y, 0, 2) = SoA[2][matrixAt(x, y, image._width)];
}
}
CImgDisplay main_disp(image, "GPU - Blurred image");
while (1)
{
main_disp.wait();
}
}
int main()
{
CImg<unsigned char> image("cake.ppm"), image_cpu("cake.ppm");
const int size = image._width * image._height;
// Array of Struct
rgb* rgbStruct;
rgbStruct = new rgb[size];
// "Struct of Array"
unsigned char* SoA[3];
SoA[0] = new unsigned char[size];
SoA[1] = new unsigned char[size];
SoA[2] = new unsigned char[size];
for (int x = 0; x < image._width; x++)
{
for (int y = 0; y < image._height; y++)
{
// Put the rgb values in to the rgb struct array
rgbStruct[matrixAt(x, y, image._width)].r = image(x, y, 0, 0);
rgbStruct[matrixAt(x, y, image._width)].g = image(x, y, 0, 1);
rgbStruct[matrixAt(x, y, image._width)].b = image(x, y, 0, 2);
}
}
// Declare device variables
rgb* AoS_d = nullptr;
unsigned char* SoA_d[3];
SoA_d[0] = nullptr;
SoA_d[1] = nullptr;
SoA_d[2] = nullptr;
// Allocate memory on device
hipMalloc((void**)&AoS_d, sizeof(rgb)*size);
hipMalloc((void**)&SoA_d[0], size);
hipMalloc((void**)&SoA_d[1], size);
hipMalloc((void**)&SoA_d[2], size);
// Send over the Array of Structure
hipMemcpy(AoS_d, rgbStruct, size*sizeof(rgb), hipMemcpyHostToDevice);
// Create a grid with correct amount of threads and blocks
dim3 numBlocks(ceil((float)image._width / (float)BLOCK_SIZEX), ceil((float)image._height / (float)BLOCK_SIZEY));
dim3 blockSize(BLOCK_SIZEX, BLOCK_SIZEY);
// Kernel call to swap array of structure to structure of arrays
dataTransformation << <numBlocks, blockSize >> > (AoS_d, SoA_d[0], image._width, image._height, 0); // R
dataTransformation << <numBlocks, blockSize >> > (AoS_d, SoA_d[1], image._width, image._height, 1); // G
dataTransformation << <numBlocks, blockSize >> > (AoS_d, SoA_d[2], image._width, image._height, 2); // B
// Send back the result to CPU
hipMemcpy(SoA[0], SoA_d[0], size, hipMemcpyDeviceToHost);
hipMemcpy(SoA[1], SoA_d[1], size, hipMemcpyDeviceToHost);
hipMemcpy(SoA[2], SoA_d[2], size, hipMemcpyDeviceToHost);
// Variable for blurred channel
unsigned char* SoA_blur = nullptr;
// Set up the mask
double* mask_d = nullptr;
double mask[5*5];
mask[matrixAt(0, 1, 5)] = mask[matrixAt(0, 3, 5)] = mask[matrixAt(1, 0, 5)] = mask[matrixAt(1, 4, 5)] =
mask[matrixAt(3, 0, 5)] = mask[matrixAt(3, 4, 5)] = mask[matrixAt(4, 1, 5)] = mask[matrixAt(4, 3, 5)] = 4.0 / 256.0;
mask[matrixAt(0, 0, 5)] = mask[matrixAt(0, 4, 5)] = mask[matrixAt(4, 0, 5)] = mask[matrixAt(4, 4, 5)] = 1.0 / 256.0;
mask[matrixAt(0, 2, 5)] = mask[matrixAt(2, 0, 5)] = mask[matrixAt(2, 4, 5)] = mask[matrixAt(4, 2, 5)] = 6.0 / 256.0;
mask[matrixAt(1, 1, 5)] = mask[matrixAt(1, 3, 5)] = mask[matrixAt(3, 1, 5)] = mask[matrixAt(3, 3, 5)] = 16.0 / 256.0;
mask[matrixAt(1, 2, 5)] = mask[matrixAt(2, 1, 5)] = mask[matrixAt(2, 3, 5)] = mask[matrixAt(3, 2, 5)] = 24.0 / 256.0;
mask[matrixAt(2, 2, 5)] = 36.0 / 256.0;
// Allocate memory
hipMalloc((void**)&SoA_blur, size);
hipMalloc((void**)&mask_d, sizeof(double) * 5 * 5);
hipMemcpy(mask_d, mask, sizeof(double) * 5 * 5, hipMemcpyHostToDevice);
// Kernel call to gauss blur for each channel
ImageBlur << <numBlocks, blockSize >> > (SoA_d[0], SoA_blur, mask_d, image._width, image._height); // R
hipMemcpy(SoA[0], SoA_blur, size, hipMemcpyDeviceToHost);
ImageBlur << <numBlocks, blockSize >> > (SoA_d[1], SoA_blur, mask_d, image._width, image._height); // G
hipMemcpy(SoA[1], SoA_blur, size, hipMemcpyDeviceToHost);
ImageBlur << <numBlocks, blockSize >> > (SoA_d[2], SoA_blur, mask_d, image._width, image._height); // B
hipMemcpy(SoA[2], SoA_blur, size, hipMemcpyDeviceToHost);
showImage(SoA, image);
/*
// Test
for (int x = 0; x < image._width; x++)
{
for (int y = 0; y < image._height; y++)
{
if (SoA[0][matrixAt(x, y, image._width)] != image(x, y, 0, 0) &&
SoA[1][matrixAt(x, y, image._width)] != image(x, y, 0, 1) &&
SoA[2][matrixAt(x, y, image._width)] != image(x, y, 0, 2))
printf("ErroR: @ (%d,%d) :: (%d, %d, %d)\n", x, y, SoA[0][matrixAt(x, y, image._width)], SoA[1][matrixAt(x, y, image._width)], SoA[2][matrixAt(x, y, image._width)]);
}
}
*/
return 0;
}
| 00ff5b7d90ef2f179baa1e4a8d62751157460ae6.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "CImg.h"
#include <vector>
#include <iostream>
#include <chrono>
using namespace cimg_library;
#define BLOCK_SIZEX 16
#define BLOCK_SIZEY 16
// Biggest picture will be (65535*BLOCK_SIZEX, 65535*BLOCK_SIZEY)
// If we increase BLOCK_SIZE we can get even bigger pictures. (Block_size = 32)
struct rgb
{
unsigned char r;
unsigned char g;
unsigned char b;
};
int matrixAt(int x, int y, unsigned int width)
{
return x + y * width;
}
__device__ int matrixAt_d(int x, int y, unsigned int width)
{
return x + y * width;
}
__global__ void dataTransformation(rgb* AoS_d, unsigned char* SoA_d, unsigned int width, unsigned int height, unsigned int color)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
// Don't go out of memory
if (idx < width && idy < height)
{
// Causes no thread divergence since it is only called with a constant color
if (color == 0)
{
SoA_d[matrixAt_d(idx, idy, width)] = AoS_d[matrixAt_d(idx, idy, width)].r;
}
else if (color == 1)
{
SoA_d[matrixAt_d(idx, idy, width)] = AoS_d[matrixAt_d(idx, idy, width)].g;
}
else if (color == 2)
{
SoA_d[matrixAt_d(idx, idy, width)] = AoS_d[matrixAt_d(idx, idy, width)].b;
}
}
}
__global__ void ImageBlur(unsigned char* SoA_d, unsigned char* SoA_blur, double* mask_d, unsigned int width, unsigned int height)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
double tempSum = 0;
// Do not go outside the memory
if (idx < width && idy < height)
{
for (int x = -2; x <= 2; x++)
{
for (int y = -2; y <= 2; y++)
{
// Do not sum values from outside the picture
if (idx + x >= 0 && idx + x < width && idy + y >= 0 && idy + y < height)
{
tempSum += mask_d[matrixAt_d(x + 2, y + 2, 5)] * (double)SoA_d[matrixAt_d(idx + x, idy + y, width)];
}
}
}
}
// Clamp the value to 0-255
if (tempSum > 255) tempSum = 255;
else if (tempSum < 0) tempSum = 0;
SoA_blur[matrixAt_d(idx, idy, width)] = (char)tempSum;
}
void showImage(unsigned char* SoA[3], CImg<unsigned char>& image)
{
// For showing the picture with Cimg
for (int x = 0; x < image._width; x++)
{
for (int y = 0; y < image._height; y++)
{
image(x, y, 0, 0) = SoA[0][matrixAt(x, y, image._width)];
image(x, y, 0, 1) = SoA[1][matrixAt(x, y, image._width)];
image(x, y, 0, 2) = SoA[2][matrixAt(x, y, image._width)];
}
}
CImgDisplay main_disp(image, "GPU - Blurred image");
while (1)
{
main_disp.wait();
}
}
int main()
{
CImg<unsigned char> image("cake.ppm"), image_cpu("cake.ppm");
const int size = image._width * image._height;
// Array of Struct
rgb* rgbStruct;
rgbStruct = new rgb[size];
// "Struct of Array"
unsigned char* SoA[3];
SoA[0] = new unsigned char[size];
SoA[1] = new unsigned char[size];
SoA[2] = new unsigned char[size];
for (int x = 0; x < image._width; x++)
{
for (int y = 0; y < image._height; y++)
{
// Put the rgb values in to the rgb struct array
rgbStruct[matrixAt(x, y, image._width)].r = image(x, y, 0, 0);
rgbStruct[matrixAt(x, y, image._width)].g = image(x, y, 0, 1);
rgbStruct[matrixAt(x, y, image._width)].b = image(x, y, 0, 2);
}
}
// Declare device variables
rgb* AoS_d = nullptr;
unsigned char* SoA_d[3];
SoA_d[0] = nullptr;
SoA_d[1] = nullptr;
SoA_d[2] = nullptr;
// Allocate memory on device
cudaMalloc((void**)&AoS_d, sizeof(rgb)*size);
cudaMalloc((void**)&SoA_d[0], size);
cudaMalloc((void**)&SoA_d[1], size);
cudaMalloc((void**)&SoA_d[2], size);
// Send over the Array of Structure
cudaMemcpy(AoS_d, rgbStruct, size*sizeof(rgb), cudaMemcpyHostToDevice);
// Create a grid with correct amount of threads and blocks
dim3 numBlocks(ceil((float)image._width / (float)BLOCK_SIZEX), ceil((float)image._height / (float)BLOCK_SIZEY));
dim3 blockSize(BLOCK_SIZEX, BLOCK_SIZEY);
// Kernel call to swap array of structure to structure of arrays
dataTransformation << <numBlocks, blockSize >> > (AoS_d, SoA_d[0], image._width, image._height, 0); // R
dataTransformation << <numBlocks, blockSize >> > (AoS_d, SoA_d[1], image._width, image._height, 1); // G
dataTransformation << <numBlocks, blockSize >> > (AoS_d, SoA_d[2], image._width, image._height, 2); // B
// Send back the result to CPU
cudaMemcpy(SoA[0], SoA_d[0], size, cudaMemcpyDeviceToHost);
cudaMemcpy(SoA[1], SoA_d[1], size, cudaMemcpyDeviceToHost);
cudaMemcpy(SoA[2], SoA_d[2], size, cudaMemcpyDeviceToHost);
// Variable for blurred channel
unsigned char* SoA_blur = nullptr;
// Set up the mask
double* mask_d = nullptr;
double mask[5*5];
mask[matrixAt(0, 1, 5)] = mask[matrixAt(0, 3, 5)] = mask[matrixAt(1, 0, 5)] = mask[matrixAt(1, 4, 5)] =
mask[matrixAt(3, 0, 5)] = mask[matrixAt(3, 4, 5)] = mask[matrixAt(4, 1, 5)] = mask[matrixAt(4, 3, 5)] = 4.0 / 256.0;
mask[matrixAt(0, 0, 5)] = mask[matrixAt(0, 4, 5)] = mask[matrixAt(4, 0, 5)] = mask[matrixAt(4, 4, 5)] = 1.0 / 256.0;
mask[matrixAt(0, 2, 5)] = mask[matrixAt(2, 0, 5)] = mask[matrixAt(2, 4, 5)] = mask[matrixAt(4, 2, 5)] = 6.0 / 256.0;
mask[matrixAt(1, 1, 5)] = mask[matrixAt(1, 3, 5)] = mask[matrixAt(3, 1, 5)] = mask[matrixAt(3, 3, 5)] = 16.0 / 256.0;
mask[matrixAt(1, 2, 5)] = mask[matrixAt(2, 1, 5)] = mask[matrixAt(2, 3, 5)] = mask[matrixAt(3, 2, 5)] = 24.0 / 256.0;
mask[matrixAt(2, 2, 5)] = 36.0 / 256.0;
// Allocate memory
cudaMalloc((void**)&SoA_blur, size);
cudaMalloc((void**)&mask_d, sizeof(double) * 5 * 5);
cudaMemcpy(mask_d, mask, sizeof(double) * 5 * 5, cudaMemcpyHostToDevice);
// Kernel call to gauss blur for each channel
ImageBlur << <numBlocks, blockSize >> > (SoA_d[0], SoA_blur, mask_d, image._width, image._height); // R
cudaMemcpy(SoA[0], SoA_blur, size, cudaMemcpyDeviceToHost);
ImageBlur << <numBlocks, blockSize >> > (SoA_d[1], SoA_blur, mask_d, image._width, image._height); // G
cudaMemcpy(SoA[1], SoA_blur, size, cudaMemcpyDeviceToHost);
ImageBlur << <numBlocks, blockSize >> > (SoA_d[2], SoA_blur, mask_d, image._width, image._height); // B
cudaMemcpy(SoA[2], SoA_blur, size, cudaMemcpyDeviceToHost);
showImage(SoA, image);
/*
// Test
for (int x = 0; x < image._width; x++)
{
for (int y = 0; y < image._height; y++)
{
if (SoA[0][matrixAt(x, y, image._width)] != image(x, y, 0, 0) &&
SoA[1][matrixAt(x, y, image._width)] != image(x, y, 0, 1) &&
SoA[2][matrixAt(x, y, image._width)] != image(x, y, 0, 2))
printf("ErroR: @ (%d,%d) :: (%d, %d, %d)\n", x, y, SoA[0][matrixAt(x, y, image._width)], SoA[1][matrixAt(x, y, image._width)], SoA[2][matrixAt(x, y, image._width)]);
}
}
*/
return 0;
}
|
a854e015f1ce1a5957b44ad94d2b4535ae0e12ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include "Mandelbrot.h"
#include <assert.h>
#include "DomaineMath_GPU.h"
#include "IndiceTools_GPU.h"
using namespace gpu;
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void mandelbrot(uchar4* ptrDevPixels,uint w, uint h, DomaineMath domaineMath,uint n,float dt);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
Mandelbrot::Mandelbrot(const Grid& grid, uint w, uint h, float dt, uint n, const DomaineMath& domaineMath) :
Animable_I<uchar4>(grid, w, h, "Mandelbrot_Julia_CUDA_RGBA_uchar4", domaineMath), variateurAnimation(Interval<float>(30, 100), dt)
{
this->n = n;
// Inputs
// this->dt = 0;
// Tools
this->t = 0; // protected dans Animable
}
Mandelbrot::~Mandelbrot()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void Mandelbrot::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
//Device::lastCudaError("fractale rgba uchar4 (before)"); // facultatif, for debug only, remove for release
hipLaunchKernelGGL(( mandelbrot), dim3(dg),dim3(db), 0, 0, ptrDevPixels,w,h,domaineMath,n,t);
// le kernel est importer ci-dessus (ligne 19)
//Device::lastCudaError("fractale rgba uchar4 (after)"); // facultatif, for debug only, remove for release
}
/**
* Override
* Call periodicly by the API
*/
void Mandelbrot::animationStep()
{
// t += n;
n = variateurAnimation.varierAndGet();
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| a854e015f1ce1a5957b44ad94d2b4535ae0e12ef.cu | #include <iostream>
#include <assert.h>
#include "Device.h"
#include "Mandelbrot.h"
#include <assert.h>
#include "DomaineMath_GPU.h"
#include "IndiceTools_GPU.h"
using namespace gpu;
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void mandelbrot(uchar4* ptrDevPixels,uint w, uint h, DomaineMath domaineMath,uint n,float dt);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
Mandelbrot::Mandelbrot(const Grid& grid, uint w, uint h, float dt, uint n, const DomaineMath& domaineMath) :
Animable_I<uchar4>(grid, w, h, "Mandelbrot_Julia_CUDA_RGBA_uchar4", domaineMath), variateurAnimation(Interval<float>(30, 100), dt)
{
this->n = n;
// Inputs
// this->dt = 0;
// Tools
this->t = 0; // protected dans Animable
}
Mandelbrot::~Mandelbrot()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void Mandelbrot::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
//Device::lastCudaError("fractale rgba uchar4 (before)"); // facultatif, for debug only, remove for release
mandelbrot<<<dg,db>>>(ptrDevPixels,w,h,domaineMath,n,t);
// le kernel est importer ci-dessus (ligne 19)
//Device::lastCudaError("fractale rgba uchar4 (after)"); // facultatif, for debug only, remove for release
}
/**
* Override
* Call periodicly by the API
*/
void Mandelbrot::animationStep()
{
// t += n;
n = variateurAnimation.varierAndGet();
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
9daac09a04b85a6f524021f796e36f32be01b475.hip | // !!! This is a file automatically generated by hipify!!!
//
// Author: Marko Atanasievski
//
// Copyright (C) 2020 TANCOM SOFTWARE SOLUTIONS Ltd. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
//
// Parts of this file are originally copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "cuda_util.h"
#include <iostream>
#include "mat.h"
__device__ static inline signed char gpu_float2int8(float v)
{
int int32 = static_cast<int>(round(v));
if (int32 > 127) return 127;
if (int32 < -127) return -127;
return (signed char)int32;
}
__global__ void gpu_quantize_forward(const float* a_input, const ncnn::CudaMatInfo a_info, signed char* output,
const ncnn::CudaMatInfo output_info, const float scale) {
const int column = blockIdx.x * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int channel = blockIdx.z * blockDim.z + threadIdx.z;
if (column >= a_info.w || row >= a_info.h || channel >= a_info.c)
{
return;
}
const int input_index = channel * a_info.cstep + row * a_info.w + column;
const int output_index = channel * output_info.cstep + row * output_info.w + column;
output[output_index] = gpu_float2int8(a_input[input_index] * scale);
}
namespace ncnn {
int quantize_cuda_forward(const CudaMat& bottom_blob, CudaMat& top_blob, float scale, std::shared_ptr<ncnn::CudaAllocator> cuda_allocator)
{
// std::shared_ptr<ncnn::CudaAllocator> cuda_allocator;
//
// if (blob_cuda_allocator.use_count() > 0)
// cuda_allocator = blob_cuda_allocator;
// else
// cuda_allocator = ncnn::get_current_gpu_allocator();
if (bottom_blob.dims == 1)
{
top_blob.create(bottom_blob.w, (size_t)1u, cuda_allocator);
if (top_blob.empty())
return -100;
}
else if (bottom_blob.dims == 2)
{
top_blob.create(bottom_blob.w, bottom_blob.h, (size_t)1u, cuda_allocator);
if (top_blob.empty())
return -100;
}
else if (bottom_blob.dims == 3)
{
top_blob.create(bottom_blob.w, bottom_blob.h, bottom_blob.c, (size_t)1u, cuda_allocator);
if (top_blob.empty())
return -100;
}
int thread_per_block_x = ((bottom_blob.w - 1) / 64 + 1) * 64;
if (thread_per_block_x > 128) thread_per_block_x = 128;
int thread_per_block_y = ((bottom_blob.h - 1) / 8 + 1) * 8;
if (thread_per_block_y > 8) thread_per_block_y = 8;
const int thread_per_block_z = 1;
const int total_number_of_channels = bottom_blob.c;
const int total_number_of_columns = bottom_blob.w;
const int total_number_of_rows = bottom_blob.h;
const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z);
const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1,
(total_number_of_rows - 1) / thread_per_block_y + 1,
(total_number_of_channels - 1) / thread_per_block_z + 1);
const CudaMatInfo bottom_blob_info{bottom_blob};
const CudaMatInfo top_blob_info{top_blob};
hipLaunchKernelGGL(( gpu_quantize_forward), dim3(grid_size), dim3(block_size), 0, 0, static_cast<const float *>(bottom_blob.get_craw_data()),
bottom_blob_info,
static_cast<signed char *>(top_blob.get_raw_data()),
top_blob_info,
scale);
return 0;
}
} | 9daac09a04b85a6f524021f796e36f32be01b475.cu | //
// Author: Marko Atanasievski
//
// Copyright (C) 2020 TANCOM SOFTWARE SOLUTIONS Ltd. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
//
// Parts of this file are originally copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "cuda_util.h"
#include <iostream>
#include "mat.h"
__device__ static inline signed char gpu_float2int8(float v)
{
int int32 = static_cast<int>(round(v));
if (int32 > 127) return 127;
if (int32 < -127) return -127;
return (signed char)int32;
}
__global__ void gpu_quantize_forward(const float* a_input, const ncnn::CudaMatInfo a_info, signed char* output,
const ncnn::CudaMatInfo output_info, const float scale) {
const int column = blockIdx.x * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int channel = blockIdx.z * blockDim.z + threadIdx.z;
if (column >= a_info.w || row >= a_info.h || channel >= a_info.c)
{
return;
}
const int input_index = channel * a_info.cstep + row * a_info.w + column;
const int output_index = channel * output_info.cstep + row * output_info.w + column;
output[output_index] = gpu_float2int8(a_input[input_index] * scale);
}
namespace ncnn {
int quantize_cuda_forward(const CudaMat& bottom_blob, CudaMat& top_blob, float scale, std::shared_ptr<ncnn::CudaAllocator> cuda_allocator)
{
// std::shared_ptr<ncnn::CudaAllocator> cuda_allocator;
//
// if (blob_cuda_allocator.use_count() > 0)
// cuda_allocator = blob_cuda_allocator;
// else
// cuda_allocator = ncnn::get_current_gpu_allocator();
if (bottom_blob.dims == 1)
{
top_blob.create(bottom_blob.w, (size_t)1u, cuda_allocator);
if (top_blob.empty())
return -100;
}
else if (bottom_blob.dims == 2)
{
top_blob.create(bottom_blob.w, bottom_blob.h, (size_t)1u, cuda_allocator);
if (top_blob.empty())
return -100;
}
else if (bottom_blob.dims == 3)
{
top_blob.create(bottom_blob.w, bottom_blob.h, bottom_blob.c, (size_t)1u, cuda_allocator);
if (top_blob.empty())
return -100;
}
int thread_per_block_x = ((bottom_blob.w - 1) / 64 + 1) * 64;
if (thread_per_block_x > 128) thread_per_block_x = 128;
int thread_per_block_y = ((bottom_blob.h - 1) / 8 + 1) * 8;
if (thread_per_block_y > 8) thread_per_block_y = 8;
const int thread_per_block_z = 1;
const int total_number_of_channels = bottom_blob.c;
const int total_number_of_columns = bottom_blob.w;
const int total_number_of_rows = bottom_blob.h;
const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z);
const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1,
(total_number_of_rows - 1) / thread_per_block_y + 1,
(total_number_of_channels - 1) / thread_per_block_z + 1);
const CudaMatInfo bottom_blob_info{bottom_blob};
const CudaMatInfo top_blob_info{top_blob};
gpu_quantize_forward<<<grid_size, block_size>>>(static_cast<const float *>(bottom_blob.get_craw_data()),
bottom_blob_info,
static_cast<signed char *>(top_blob.get_raw_data()),
top_blob_info,
scale);
return 0;
}
} |
9340ccddd2efe899bb477c07ea8de406e8aa5d03.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2019, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/matrix/sellp_kernels.hpp"
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/base/math.hpp>
#include <ginkgo/core/base/types.hpp>
#include <ginkgo/core/matrix/csr.hpp>
#include <ginkgo/core/matrix/dense.hpp>
#include "cuda/base/cusparse_bindings.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/prefix_sum.cuh"
#include "cuda/components/reduction.cuh"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The SELL-P matrix format namespace.
*
* @ingroup sellp
*/
namespace sellp {
namespace {
constexpr auto default_block_size = 512;
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(matrix::default_slice_size) void spmv_kernel(
size_type num_rows, size_type num_right_hand_sides, size_type b_stride,
size_type c_stride, const size_type *__restrict__ slice_lengths,
const size_type *__restrict__ slice_sets, const ValueType *__restrict__ a,
const IndexType *__restrict__ col, const ValueType *__restrict__ b,
ValueType *__restrict__ c)
{
const auto slice_id = blockIdx.x;
const auto slice_size = blockDim.x;
const auto row_in_slice = threadIdx.x;
const auto global_row =
static_cast<size_type>(slice_size) * slice_id + row_in_slice;
const auto column_id = blockIdx.y;
ValueType val = 0;
IndexType ind = 0;
if (global_row < num_rows && column_id < num_right_hand_sides) {
for (size_type i = 0; i < slice_lengths[slice_id]; i++) {
ind = row_in_slice + (slice_sets[slice_id] + i) * slice_size;
val += a[ind] * b[col[ind] * b_stride + column_id];
}
c[global_row * c_stride + column_id] = val;
}
}
} // namespace
template <typename ValueType, typename IndexType>
void spmv(std::shared_ptr<const CudaExecutor> exec,
const matrix::Sellp<ValueType, IndexType> *a,
const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *c)
{
const dim3 blockSize(matrix::default_slice_size);
const dim3 gridSize(ceildiv(a->get_size()[0], matrix::default_slice_size),
b->get_size()[1]);
hipLaunchKernelGGL(( spmv_kernel), dim3(gridSize), dim3(blockSize), 0, 0,
a->get_size()[0], b->get_size()[1], b->get_stride(), c->get_stride(),
a->get_const_slice_lengths(), a->get_const_slice_sets(),
as_cuda_type(a->get_const_values()), a->get_const_col_idxs(),
as_cuda_type(b->get_const_values()), as_cuda_type(c->get_values()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(GKO_DECLARE_SELLP_SPMV_KERNEL);
namespace {
template <typename ValueType, typename IndexType>
__global__
__launch_bounds__(matrix::default_slice_size) void advanced_spmv_kernel(
size_type num_rows, size_type num_right_hand_sides, size_type b_stride,
size_type c_stride, const size_type *__restrict__ slice_lengths,
const size_type *__restrict__ slice_sets,
const ValueType *__restrict__ alpha, const ValueType *__restrict__ a,
const IndexType *__restrict__ col, const ValueType *__restrict__ b,
const ValueType *__restrict__ beta, ValueType *__restrict__ c)
{
const auto slice_id = blockIdx.x;
const auto slice_size = blockDim.x;
const auto row_in_slice = threadIdx.x;
const auto global_row =
static_cast<size_type>(slice_size) * slice_id + row_in_slice;
const auto column_id = blockIdx.y;
ValueType val = 0;
IndexType ind = 0;
if (global_row < num_rows && column_id < num_right_hand_sides) {
for (size_type i = 0; i < slice_lengths[slice_id]; i++) {
ind = row_in_slice + (slice_sets[slice_id] + i) * slice_size;
val += alpha[0] * a[ind] * b[col[ind] * b_stride + column_id];
}
c[global_row * c_stride + column_id] =
beta[0] * c[global_row * c_stride + column_id] + val;
}
}
} // namespace
template <typename ValueType, typename IndexType>
void advanced_spmv(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *alpha,
const matrix::Sellp<ValueType, IndexType> *a,
const matrix::Dense<ValueType> *b,
const matrix::Dense<ValueType> *beta,
matrix::Dense<ValueType> *c)
{
const dim3 blockSize(matrix::default_slice_size);
const dim3 gridSize(ceildiv(a->get_size()[0], matrix::default_slice_size),
b->get_size()[1]);
hipLaunchKernelGGL(( advanced_spmv_kernel), dim3(gridSize), dim3(blockSize), 0, 0,
a->get_size()[0], b->get_size()[1], b->get_stride(), c->get_stride(),
a->get_const_slice_lengths(), a->get_const_slice_sets(),
as_cuda_type(alpha->get_const_values()),
as_cuda_type(a->get_const_values()), a->get_const_col_idxs(),
as_cuda_type(b->get_const_values()),
as_cuda_type(beta->get_const_values()), as_cuda_type(c->get_values()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SELLP_ADVANCED_SPMV_KERNEL);
namespace kernel {
template <typename ValueType>
__global__ __launch_bounds__(default_block_size) void initialize_zero_dense(
size_type num_rows, size_type num_cols, size_type stride,
ValueType *__restrict__ result)
{
const auto tidx_x = threadIdx.x + blockDim.x * blockIdx.x;
const auto tidx_y = threadIdx.y + blockDim.y * blockIdx.y;
if (tidx_x < num_cols && tidx_y < num_rows) {
result[tidx_y * stride + tidx_x] = zero<ValueType>();
}
}
template <unsigned int threads_per_row, typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void fill_in_dense(
size_type num_rows, size_type num_cols, size_type stride,
size_type slice_size, const size_type *__restrict__ slice_lengths,
const size_type *__restrict__ slice_sets,
const IndexType *__restrict__ col_idxs,
const ValueType *__restrict__ values, ValueType *__restrict__ result)
{
const auto global_row =
(blockDim.x * blockIdx.x + threadIdx.x) / threads_per_row;
const auto row = global_row % slice_size;
const auto slice = global_row / slice_size;
const auto start_index = threadIdx.x % threads_per_row;
if (global_row < num_rows) {
for (auto i = start_index; i < slice_lengths[slice];
i += threads_per_row) {
if (values[(slice_sets[slice] + i) * slice_size + row] !=
zero<ValueType>()) {
result[global_row * stride +
col_idxs[(slice_sets[slice] + i) * slice_size + row]] =
values[(slice_sets[slice] + i) * slice_size + row];
}
}
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void convert_to_dense(std::shared_ptr<const CudaExecutor> exec,
matrix::Dense<ValueType> *result,
const matrix::Sellp<ValueType, IndexType> *source)
{
const auto num_rows = source->get_size()[0];
const auto num_cols = source->get_size()[1];
const auto vals = source->get_const_values();
const auto col_idxs = source->get_const_col_idxs();
const auto slice_lengths = source->get_const_slice_lengths();
const auto slice_sets = source->get_const_slice_sets();
const auto slice_size = source->get_slice_size();
const auto slice_num = ceildiv(num_rows, slice_size);
const dim3 block_size(cuda_config::warp_size,
cuda_config::max_block_size / cuda_config::warp_size,
1);
const dim3 init_grid_dim(ceildiv(result->get_stride(), block_size.x),
ceildiv(num_rows, block_size.y), 1);
hipLaunchKernelGGL(( kernel::initialize_zero_dense), dim3(init_grid_dim), dim3(block_size), 0, 0,
num_rows, num_cols, result->get_stride(),
as_cuda_type(result->get_values()));
constexpr auto threads_per_row = cuda_config::warp_size;
const auto grid_dim =
ceildiv(slice_size * slice_num * threads_per_row, default_block_size);
hipLaunchKernelGGL(( kernel::fill_in_dense<threads_per_row>), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows, num_cols, result->get_stride(), slice_size,
as_cuda_type(slice_lengths), as_cuda_type(slice_sets),
as_cuda_type(col_idxs), as_cuda_type(vals),
as_cuda_type(result->get_values()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SELLP_CONVERT_TO_DENSE_KERNEL);
namespace kernel {
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void count_nnz_per_row(
size_type num_rows, size_type slice_size,
const size_type *__restrict__ slice_sets,
const ValueType *__restrict__ values, IndexType *__restrict__ result)
{
constexpr auto warp_size = cuda_config::warp_size;
const auto tidx = threadIdx.x + blockIdx.x * blockDim.x;
const auto row_idx = tidx / warp_size;
const auto slice_id = row_idx / slice_size;
const auto tid_in_warp = tidx % warp_size;
const auto row_in_slice = row_idx % slice_size;
if (row_idx < num_rows) {
IndexType part_result{};
for (size_type sellp_ind =
(slice_sets[slice_id] + tid_in_warp) * slice_size +
row_in_slice;
sellp_ind < slice_sets[slice_id + 1] * slice_size;
sellp_ind += warp_size * slice_size) {
if (values[sellp_ind] != zero<ValueType>()) {
part_result += 1;
}
}
auto warp_tile =
group::tiled_partition<warp_size>(group::this_thread_block());
result[row_idx] = reduce(
warp_tile, part_result,
[](const size_type &a, const size_type &b) { return a + b; });
}
}
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void fill_in_csr(
size_type num_rows, size_type slice_size,
const size_type *__restrict__ source_slice_sets,
const IndexType *__restrict__ source_col_idxs,
const ValueType *__restrict__ source_values,
IndexType *__restrict__ result_row_ptrs,
IndexType *__restrict__ result_col_idxs,
ValueType *__restrict__ result_values)
{
const auto row = threadIdx.x + blockIdx.x * blockDim.x;
const auto slice_id = row / slice_size;
const auto row_in_slice = row % slice_size;
if (row < num_rows) {
size_type csr_ind = result_row_ptrs[row];
for (size_type sellp_ind =
source_slice_sets[slice_id] * slice_size + row_in_slice;
sellp_ind < source_slice_sets[slice_id + 1] * slice_size;
sellp_ind += slice_size) {
if (source_values[sellp_ind] != zero<ValueType>()) {
result_values[csr_ind] = source_values[sellp_ind];
result_col_idxs[csr_ind] = source_col_idxs[sellp_ind];
csr_ind++;
}
}
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void convert_to_csr(std::shared_ptr<const CudaExecutor> exec,
matrix::Csr<ValueType, IndexType> *result,
const matrix::Sellp<ValueType, IndexType> *source)
{
const auto num_rows = source->get_size()[0];
const auto slice_size = source->get_slice_size();
const auto slice_num = ceildiv(num_rows, slice_size);
const auto source_values = source->get_const_values();
const auto source_slice_lengths = source->get_const_slice_lengths();
const auto source_slice_sets = source->get_const_slice_sets();
const auto source_col_idxs = source->get_const_col_idxs();
auto result_values = result->get_values();
auto result_col_idxs = result->get_col_idxs();
auto result_row_ptrs = result->get_row_ptrs();
auto grid_dim =
ceildiv(num_rows * cuda_config::warp_size, default_block_size);
hipLaunchKernelGGL(( kernel::count_nnz_per_row), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows, slice_size, as_cuda_type(source_slice_sets),
as_cuda_type(source_values), as_cuda_type(result_row_ptrs));
grid_dim = ceildiv(num_rows + 1, default_block_size);
auto add_values = Array<IndexType>(exec, grid_dim);
hipLaunchKernelGGL(( start_prefix_sum<default_block_size>), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows + 1, as_cuda_type(result_row_ptrs),
as_cuda_type(add_values.get_data()));
hipLaunchKernelGGL(( finalize_prefix_sum<default_block_size>), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows + 1, as_cuda_type(result_row_ptrs),
as_cuda_type(add_values.get_const_data()));
grid_dim = ceildiv(num_rows, default_block_size);
hipLaunchKernelGGL(( kernel::fill_in_csr), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows, slice_size, as_cuda_type(source_slice_sets),
as_cuda_type(source_col_idxs), as_cuda_type(source_values),
as_cuda_type(result_row_ptrs), as_cuda_type(result_col_idxs),
as_cuda_type(result_values));
add_values.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SELLP_CONVERT_TO_CSR_KERNEL);
namespace kernel {
__global__ __launch_bounds__(default_block_size) void reduce_nnz(
size_type size, const size_type *__restrict__ nnz_per_row,
size_type *__restrict__ result)
{
extern __shared__ size_type block_sum[];
reduce_array(size, nnz_per_row, block_sum,
[](const size_type &x, const size_type &y) { return x + y; });
if (threadIdx.x == 0) {
result[blockIdx.x] = block_sum[0];
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void count_nonzeros(std::shared_ptr<const CudaExecutor> exec,
const matrix::Sellp<ValueType, IndexType> *source,
size_type *result)
{
const auto num_rows = source->get_size()[0];
const auto slice_size = source->get_slice_size();
const auto slice_sets = source->get_const_slice_sets();
const auto values = source->get_const_values();
auto nnz_per_row = Array<size_type>(exec, num_rows);
auto grid_dim =
ceildiv(num_rows * cuda_config::warp_size, default_block_size);
hipLaunchKernelGGL(( kernel::count_nnz_per_row), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows, slice_size, as_cuda_type(slice_sets), as_cuda_type(values),
as_cuda_type(nnz_per_row.get_data()));
const auto n = ceildiv(num_rows, default_block_size);
grid_dim = (n <= default_block_size) ? n : default_block_size;
auto block_results = Array<size_type>(exec, grid_dim);
hipLaunchKernelGGL(( kernel::reduce_nnz), dim3(grid_dim), dim3(default_block_size),
default_block_size * sizeof(size_type), 0,
num_rows, as_cuda_type(nnz_per_row.get_const_data()),
as_cuda_type(block_results.get_data()));
auto d_result = Array<size_type>(exec, 1);
hipLaunchKernelGGL(( kernel::reduce_nnz), dim3(1), dim3(default_block_size),
default_block_size * sizeof(size_type), 0,
grid_dim, as_cuda_type(block_results.get_const_data()),
as_cuda_type(d_result.get_data()));
exec->get_master()->copy_from(exec.get(), 1, d_result.get_const_data(),
result);
d_result.clear();
block_results.clear();
nnz_per_row.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SELLP_COUNT_NONZEROS_KERNEL);
} // namespace sellp
} // namespace cuda
} // namespace kernels
} // namespace gko
| 9340ccddd2efe899bb477c07ea8de406e8aa5d03.cu | /*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2019, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/matrix/sellp_kernels.hpp"
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/base/math.hpp>
#include <ginkgo/core/base/types.hpp>
#include <ginkgo/core/matrix/csr.hpp>
#include <ginkgo/core/matrix/dense.hpp>
#include "cuda/base/cusparse_bindings.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/prefix_sum.cuh"
#include "cuda/components/reduction.cuh"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The SELL-P matrix format namespace.
*
* @ingroup sellp
*/
namespace sellp {
namespace {
constexpr auto default_block_size = 512;
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(matrix::default_slice_size) void spmv_kernel(
size_type num_rows, size_type num_right_hand_sides, size_type b_stride,
size_type c_stride, const size_type *__restrict__ slice_lengths,
const size_type *__restrict__ slice_sets, const ValueType *__restrict__ a,
const IndexType *__restrict__ col, const ValueType *__restrict__ b,
ValueType *__restrict__ c)
{
const auto slice_id = blockIdx.x;
const auto slice_size = blockDim.x;
const auto row_in_slice = threadIdx.x;
const auto global_row =
static_cast<size_type>(slice_size) * slice_id + row_in_slice;
const auto column_id = blockIdx.y;
ValueType val = 0;
IndexType ind = 0;
if (global_row < num_rows && column_id < num_right_hand_sides) {
for (size_type i = 0; i < slice_lengths[slice_id]; i++) {
ind = row_in_slice + (slice_sets[slice_id] + i) * slice_size;
val += a[ind] * b[col[ind] * b_stride + column_id];
}
c[global_row * c_stride + column_id] = val;
}
}
} // namespace
template <typename ValueType, typename IndexType>
void spmv(std::shared_ptr<const CudaExecutor> exec,
const matrix::Sellp<ValueType, IndexType> *a,
const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *c)
{
const dim3 blockSize(matrix::default_slice_size);
const dim3 gridSize(ceildiv(a->get_size()[0], matrix::default_slice_size),
b->get_size()[1]);
spmv_kernel<<<gridSize, blockSize>>>(
a->get_size()[0], b->get_size()[1], b->get_stride(), c->get_stride(),
a->get_const_slice_lengths(), a->get_const_slice_sets(),
as_cuda_type(a->get_const_values()), a->get_const_col_idxs(),
as_cuda_type(b->get_const_values()), as_cuda_type(c->get_values()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(GKO_DECLARE_SELLP_SPMV_KERNEL);
namespace {
template <typename ValueType, typename IndexType>
__global__
__launch_bounds__(matrix::default_slice_size) void advanced_spmv_kernel(
size_type num_rows, size_type num_right_hand_sides, size_type b_stride,
size_type c_stride, const size_type *__restrict__ slice_lengths,
const size_type *__restrict__ slice_sets,
const ValueType *__restrict__ alpha, const ValueType *__restrict__ a,
const IndexType *__restrict__ col, const ValueType *__restrict__ b,
const ValueType *__restrict__ beta, ValueType *__restrict__ c)
{
const auto slice_id = blockIdx.x;
const auto slice_size = blockDim.x;
const auto row_in_slice = threadIdx.x;
const auto global_row =
static_cast<size_type>(slice_size) * slice_id + row_in_slice;
const auto column_id = blockIdx.y;
ValueType val = 0;
IndexType ind = 0;
if (global_row < num_rows && column_id < num_right_hand_sides) {
for (size_type i = 0; i < slice_lengths[slice_id]; i++) {
ind = row_in_slice + (slice_sets[slice_id] + i) * slice_size;
val += alpha[0] * a[ind] * b[col[ind] * b_stride + column_id];
}
c[global_row * c_stride + column_id] =
beta[0] * c[global_row * c_stride + column_id] + val;
}
}
} // namespace
template <typename ValueType, typename IndexType>
void advanced_spmv(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *alpha,
const matrix::Sellp<ValueType, IndexType> *a,
const matrix::Dense<ValueType> *b,
const matrix::Dense<ValueType> *beta,
matrix::Dense<ValueType> *c)
{
const dim3 blockSize(matrix::default_slice_size);
const dim3 gridSize(ceildiv(a->get_size()[0], matrix::default_slice_size),
b->get_size()[1]);
advanced_spmv_kernel<<<gridSize, blockSize>>>(
a->get_size()[0], b->get_size()[1], b->get_stride(), c->get_stride(),
a->get_const_slice_lengths(), a->get_const_slice_sets(),
as_cuda_type(alpha->get_const_values()),
as_cuda_type(a->get_const_values()), a->get_const_col_idxs(),
as_cuda_type(b->get_const_values()),
as_cuda_type(beta->get_const_values()), as_cuda_type(c->get_values()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SELLP_ADVANCED_SPMV_KERNEL);
namespace kernel {
template <typename ValueType>
__global__ __launch_bounds__(default_block_size) void initialize_zero_dense(
size_type num_rows, size_type num_cols, size_type stride,
ValueType *__restrict__ result)
{
const auto tidx_x = threadIdx.x + blockDim.x * blockIdx.x;
const auto tidx_y = threadIdx.y + blockDim.y * blockIdx.y;
if (tidx_x < num_cols && tidx_y < num_rows) {
result[tidx_y * stride + tidx_x] = zero<ValueType>();
}
}
template <unsigned int threads_per_row, typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void fill_in_dense(
size_type num_rows, size_type num_cols, size_type stride,
size_type slice_size, const size_type *__restrict__ slice_lengths,
const size_type *__restrict__ slice_sets,
const IndexType *__restrict__ col_idxs,
const ValueType *__restrict__ values, ValueType *__restrict__ result)
{
const auto global_row =
(blockDim.x * blockIdx.x + threadIdx.x) / threads_per_row;
const auto row = global_row % slice_size;
const auto slice = global_row / slice_size;
const auto start_index = threadIdx.x % threads_per_row;
if (global_row < num_rows) {
for (auto i = start_index; i < slice_lengths[slice];
i += threads_per_row) {
if (values[(slice_sets[slice] + i) * slice_size + row] !=
zero<ValueType>()) {
result[global_row * stride +
col_idxs[(slice_sets[slice] + i) * slice_size + row]] =
values[(slice_sets[slice] + i) * slice_size + row];
}
}
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void convert_to_dense(std::shared_ptr<const CudaExecutor> exec,
matrix::Dense<ValueType> *result,
const matrix::Sellp<ValueType, IndexType> *source)
{
const auto num_rows = source->get_size()[0];
const auto num_cols = source->get_size()[1];
const auto vals = source->get_const_values();
const auto col_idxs = source->get_const_col_idxs();
const auto slice_lengths = source->get_const_slice_lengths();
const auto slice_sets = source->get_const_slice_sets();
const auto slice_size = source->get_slice_size();
const auto slice_num = ceildiv(num_rows, slice_size);
const dim3 block_size(cuda_config::warp_size,
cuda_config::max_block_size / cuda_config::warp_size,
1);
const dim3 init_grid_dim(ceildiv(result->get_stride(), block_size.x),
ceildiv(num_rows, block_size.y), 1);
kernel::initialize_zero_dense<<<init_grid_dim, block_size>>>(
num_rows, num_cols, result->get_stride(),
as_cuda_type(result->get_values()));
constexpr auto threads_per_row = cuda_config::warp_size;
const auto grid_dim =
ceildiv(slice_size * slice_num * threads_per_row, default_block_size);
kernel::fill_in_dense<threads_per_row><<<grid_dim, default_block_size>>>(
num_rows, num_cols, result->get_stride(), slice_size,
as_cuda_type(slice_lengths), as_cuda_type(slice_sets),
as_cuda_type(col_idxs), as_cuda_type(vals),
as_cuda_type(result->get_values()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SELLP_CONVERT_TO_DENSE_KERNEL);
namespace kernel {
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void count_nnz_per_row(
size_type num_rows, size_type slice_size,
const size_type *__restrict__ slice_sets,
const ValueType *__restrict__ values, IndexType *__restrict__ result)
{
constexpr auto warp_size = cuda_config::warp_size;
const auto tidx = threadIdx.x + blockIdx.x * blockDim.x;
const auto row_idx = tidx / warp_size;
const auto slice_id = row_idx / slice_size;
const auto tid_in_warp = tidx % warp_size;
const auto row_in_slice = row_idx % slice_size;
if (row_idx < num_rows) {
IndexType part_result{};
for (size_type sellp_ind =
(slice_sets[slice_id] + tid_in_warp) * slice_size +
row_in_slice;
sellp_ind < slice_sets[slice_id + 1] * slice_size;
sellp_ind += warp_size * slice_size) {
if (values[sellp_ind] != zero<ValueType>()) {
part_result += 1;
}
}
auto warp_tile =
group::tiled_partition<warp_size>(group::this_thread_block());
result[row_idx] = reduce(
warp_tile, part_result,
[](const size_type &a, const size_type &b) { return a + b; });
}
}
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void fill_in_csr(
size_type num_rows, size_type slice_size,
const size_type *__restrict__ source_slice_sets,
const IndexType *__restrict__ source_col_idxs,
const ValueType *__restrict__ source_values,
IndexType *__restrict__ result_row_ptrs,
IndexType *__restrict__ result_col_idxs,
ValueType *__restrict__ result_values)
{
const auto row = threadIdx.x + blockIdx.x * blockDim.x;
const auto slice_id = row / slice_size;
const auto row_in_slice = row % slice_size;
if (row < num_rows) {
size_type csr_ind = result_row_ptrs[row];
for (size_type sellp_ind =
source_slice_sets[slice_id] * slice_size + row_in_slice;
sellp_ind < source_slice_sets[slice_id + 1] * slice_size;
sellp_ind += slice_size) {
if (source_values[sellp_ind] != zero<ValueType>()) {
result_values[csr_ind] = source_values[sellp_ind];
result_col_idxs[csr_ind] = source_col_idxs[sellp_ind];
csr_ind++;
}
}
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void convert_to_csr(std::shared_ptr<const CudaExecutor> exec,
matrix::Csr<ValueType, IndexType> *result,
const matrix::Sellp<ValueType, IndexType> *source)
{
const auto num_rows = source->get_size()[0];
const auto slice_size = source->get_slice_size();
const auto slice_num = ceildiv(num_rows, slice_size);
const auto source_values = source->get_const_values();
const auto source_slice_lengths = source->get_const_slice_lengths();
const auto source_slice_sets = source->get_const_slice_sets();
const auto source_col_idxs = source->get_const_col_idxs();
auto result_values = result->get_values();
auto result_col_idxs = result->get_col_idxs();
auto result_row_ptrs = result->get_row_ptrs();
auto grid_dim =
ceildiv(num_rows * cuda_config::warp_size, default_block_size);
kernel::count_nnz_per_row<<<grid_dim, default_block_size>>>(
num_rows, slice_size, as_cuda_type(source_slice_sets),
as_cuda_type(source_values), as_cuda_type(result_row_ptrs));
grid_dim = ceildiv(num_rows + 1, default_block_size);
auto add_values = Array<IndexType>(exec, grid_dim);
start_prefix_sum<default_block_size><<<grid_dim, default_block_size>>>(
num_rows + 1, as_cuda_type(result_row_ptrs),
as_cuda_type(add_values.get_data()));
finalize_prefix_sum<default_block_size><<<grid_dim, default_block_size>>>(
num_rows + 1, as_cuda_type(result_row_ptrs),
as_cuda_type(add_values.get_const_data()));
grid_dim = ceildiv(num_rows, default_block_size);
kernel::fill_in_csr<<<grid_dim, default_block_size>>>(
num_rows, slice_size, as_cuda_type(source_slice_sets),
as_cuda_type(source_col_idxs), as_cuda_type(source_values),
as_cuda_type(result_row_ptrs), as_cuda_type(result_col_idxs),
as_cuda_type(result_values));
add_values.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SELLP_CONVERT_TO_CSR_KERNEL);
namespace kernel {
__global__ __launch_bounds__(default_block_size) void reduce_nnz(
size_type size, const size_type *__restrict__ nnz_per_row,
size_type *__restrict__ result)
{
extern __shared__ size_type block_sum[];
reduce_array(size, nnz_per_row, block_sum,
[](const size_type &x, const size_type &y) { return x + y; });
if (threadIdx.x == 0) {
result[blockIdx.x] = block_sum[0];
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void count_nonzeros(std::shared_ptr<const CudaExecutor> exec,
const matrix::Sellp<ValueType, IndexType> *source,
size_type *result)
{
const auto num_rows = source->get_size()[0];
const auto slice_size = source->get_slice_size();
const auto slice_sets = source->get_const_slice_sets();
const auto values = source->get_const_values();
auto nnz_per_row = Array<size_type>(exec, num_rows);
auto grid_dim =
ceildiv(num_rows * cuda_config::warp_size, default_block_size);
kernel::count_nnz_per_row<<<grid_dim, default_block_size>>>(
num_rows, slice_size, as_cuda_type(slice_sets), as_cuda_type(values),
as_cuda_type(nnz_per_row.get_data()));
const auto n = ceildiv(num_rows, default_block_size);
grid_dim = (n <= default_block_size) ? n : default_block_size;
auto block_results = Array<size_type>(exec, grid_dim);
kernel::reduce_nnz<<<grid_dim, default_block_size,
default_block_size * sizeof(size_type)>>>(
num_rows, as_cuda_type(nnz_per_row.get_const_data()),
as_cuda_type(block_results.get_data()));
auto d_result = Array<size_type>(exec, 1);
kernel::reduce_nnz<<<1, default_block_size,
default_block_size * sizeof(size_type)>>>(
grid_dim, as_cuda_type(block_results.get_const_data()),
as_cuda_type(d_result.get_data()));
exec->get_master()->copy_from(exec.get(), 1, d_result.get_const_data(),
result);
d_result.clear();
block_results.clear();
nnz_per_row.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_SELLP_COUNT_NONZEROS_KERNEL);
} // namespace sellp
} // namespace cuda
} // namespace kernels
} // namespace gko
|
0ca73f3e3958a8a33160bbd12a9ed8a210b6dd3e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "softmax_layer_updater_cuda.h"
#include <hip/hip_runtime.h>
#include "../neural_network_exception.h"
#include "util_cuda.h"
__global__ void softmax_upd_kernel(
const float * __restrict input,
float * __restrict output,
int feature_map_count,
int neuron_count_per_feature_map,
int entry_count)
{
int neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
if ((neuron_id < neuron_count_per_feature_map) && (entry_id < entry_count))
{
int initial_offset = entry_id * feature_map_count * neuron_count_per_feature_map + neuron_id;
float sum = 0.0F;
const float * current_input = input + initial_offset;
for(int i = 0; i < feature_map_count; ++i)
{
sum += __expf(*current_input);
current_input += neuron_count_per_feature_map;
}
float mult = __fdividef(1.0F, sum);
current_input = input + initial_offset;
float * current_output = output + initial_offset;
for(int i = 0; i < feature_map_count; ++i)
{
float val = __expf(*current_input);
*current_output = val * mult;
current_input += neuron_count_per_feature_map;
current_output += neuron_count_per_feature_map;
}
}
}
__global__ void softmax_deriviative_upd_kernel(
float * __restrict errors,
const float * __restrict output_neurons,
int feature_map_count,
int neuron_count_per_feature_map,
int entry_count)
{
int neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
if ((neuron_id < neuron_count_per_feature_map) && (entry_id < entry_count))
{
int initial_offset = entry_id * feature_map_count * neuron_count_per_feature_map + neuron_id;
float sum = 0.0F;
const float * current_output_neurons = output_neurons + initial_offset;
const float * current_output_errors = errors + initial_offset;
for(int i = 0; i < feature_map_count; ++i)
{
sum += __load_nc(current_output_neurons) * __load_nc(current_output_errors);
current_output_neurons += neuron_count_per_feature_map;
current_output_errors += neuron_count_per_feature_map;
}
current_output_neurons = output_neurons + initial_offset;
float * current_errors = errors + initial_offset;
for(int i = 0; i < feature_map_count; ++i)
{
*current_errors = __load_nc(current_output_neurons) * (__load_nc(current_errors) - sum);
current_output_neurons += neuron_count_per_feature_map;
current_errors += neuron_count_per_feature_map;
}
}
}
namespace nnforge
{
namespace cuda
{
softmax_layer_updater_cuda::softmax_layer_updater_cuda()
{
}
softmax_layer_updater_cuda::~softmax_layer_updater_cuda()
{
}
void softmax_layer_updater_cuda::enqueue_test(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
input_elem_count_per_feature_map,
entry_count,
1);
hipLaunchKernelGGL(( softmax_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_neurons_buffer,
*output_neurons_buffer,
input_configuration_specific.feature_map_count,
input_elem_count_per_feature_map,
entry_count);
}
void softmax_layer_updater_cuda::enqueue_backprop(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
input_elem_count_per_feature_map,
entry_count,
1);
hipLaunchKernelGGL(( softmax_deriviative_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*output_errors_buffer,
*output_neurons_buffer,
input_configuration_specific.feature_map_count,
input_elem_count_per_feature_map,
entry_count);
}
bool softmax_layer_updater_cuda::is_in_place_backprop() const
{
return true;
}
void softmax_layer_updater_cuda::updater_configured()
{
if (!different_input)
throw neural_network_exception("softmax_layer_updater_cuda is not able to run using the same input");
}
}
}
| 0ca73f3e3958a8a33160bbd12a9ed8a210b6dd3e.cu | /*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "softmax_layer_updater_cuda.h"
#include <cuda_runtime.h>
#include "../neural_network_exception.h"
#include "util_cuda.h"
__global__ void softmax_upd_kernel(
const float * __restrict input,
float * __restrict output,
int feature_map_count,
int neuron_count_per_feature_map,
int entry_count)
{
int neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
if ((neuron_id < neuron_count_per_feature_map) && (entry_id < entry_count))
{
int initial_offset = entry_id * feature_map_count * neuron_count_per_feature_map + neuron_id;
float sum = 0.0F;
const float * current_input = input + initial_offset;
for(int i = 0; i < feature_map_count; ++i)
{
sum += __expf(*current_input);
current_input += neuron_count_per_feature_map;
}
float mult = __fdividef(1.0F, sum);
current_input = input + initial_offset;
float * current_output = output + initial_offset;
for(int i = 0; i < feature_map_count; ++i)
{
float val = __expf(*current_input);
*current_output = val * mult;
current_input += neuron_count_per_feature_map;
current_output += neuron_count_per_feature_map;
}
}
}
__global__ void softmax_deriviative_upd_kernel(
float * __restrict errors,
const float * __restrict output_neurons,
int feature_map_count,
int neuron_count_per_feature_map,
int entry_count)
{
int neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
if ((neuron_id < neuron_count_per_feature_map) && (entry_id < entry_count))
{
int initial_offset = entry_id * feature_map_count * neuron_count_per_feature_map + neuron_id;
float sum = 0.0F;
const float * current_output_neurons = output_neurons + initial_offset;
const float * current_output_errors = errors + initial_offset;
for(int i = 0; i < feature_map_count; ++i)
{
sum += __load_nc(current_output_neurons) * __load_nc(current_output_errors);
current_output_neurons += neuron_count_per_feature_map;
current_output_errors += neuron_count_per_feature_map;
}
current_output_neurons = output_neurons + initial_offset;
float * current_errors = errors + initial_offset;
for(int i = 0; i < feature_map_count; ++i)
{
*current_errors = __load_nc(current_output_neurons) * (__load_nc(current_errors) - sum);
current_output_neurons += neuron_count_per_feature_map;
current_errors += neuron_count_per_feature_map;
}
}
}
namespace nnforge
{
namespace cuda
{
softmax_layer_updater_cuda::softmax_layer_updater_cuda()
{
}
softmax_layer_updater_cuda::~softmax_layer_updater_cuda()
{
}
void softmax_layer_updater_cuda::enqueue_test(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
input_elem_count_per_feature_map,
entry_count,
1);
softmax_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_neurons_buffer,
*output_neurons_buffer,
input_configuration_specific.feature_map_count,
input_elem_count_per_feature_map,
entry_count);
}
void softmax_layer_updater_cuda::enqueue_backprop(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
input_elem_count_per_feature_map,
entry_count,
1);
softmax_deriviative_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*output_errors_buffer,
*output_neurons_buffer,
input_configuration_specific.feature_map_count,
input_elem_count_per_feature_map,
entry_count);
}
bool softmax_layer_updater_cuda::is_in_place_backprop() const
{
return true;
}
void softmax_layer_updater_cuda::updater_configured()
{
if (!different_input)
throw neural_network_exception("softmax_layer_updater_cuda is not able to run using the same input");
}
}
}
|
a289327a76b519c20fd425ac53b64ded1882034d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <color_spinor_field.h>
#include <color_spinor_field_order.h>
#include <tune_quda.h>
#include <hipcub/hipcub.hpp>
#include <typeinfo>
#include <multigrid_helper.cuh>
// enabling CTA swizzling improves spatial locality of MG blocks reducing cache line wastage
#define SWIZZLE
namespace quda {
#ifdef GPU_MULTIGRID
using namespace quda::colorspinor;
/**
Kernel argument struct
*/
template <typename Out, typename In, typename Rotator, int fineSpin, int coarseSpin>
struct RestrictArg {
Out out;
const In in;
const Rotator V;
const int *fine_to_coarse;
const int *coarse_to_fine;
const spin_mapper<fineSpin,coarseSpin> spin_map;
const int parity; // the parity of the input field (if single parity)
const int nParity; // number of parities of input fine field
int swizzle; // swizzle factor for transposing blockIdx.x mappnig to coarse grid coordinate
RestrictArg(Out &out, const In &in, const Rotator &V,
const int *fine_to_coarse, const int *coarse_to_fine,
int parity, const ColorSpinorField &meta) :
out(out), in(in), V(V), fine_to_coarse(fine_to_coarse), coarse_to_fine(coarse_to_fine),
spin_map(), parity(parity), nParity(meta.SiteSubset()), swizzle(1)
{ }
RestrictArg(const RestrictArg<Out,In,Rotator,fineSpin,coarseSpin> &arg) :
out(arg.out), in(arg.in), V(arg.V),
fine_to_coarse(arg.fine_to_coarse), coarse_to_fine(arg.coarse_to_fine), spin_map(),
parity(arg.parity), nParity(arg.nParity), swizzle(arg.swizzle)
{ }
};
/**
Rotates from the fine-color basis into the coarse-color basis.
*/
template <typename Float, int fineSpin, int fineColor, int coarseColor, int coarse_colors_per_thread,
class FineColor, class Rotator>
__device__ __host__ inline void rotateCoarseColor(complex<Float> out[fineSpin*coarse_colors_per_thread],
const FineColor &in, const Rotator &V,
int parity, int nParity, int x_cb, int coarse_color_block) {
const int spinor_parity = (nParity == 2) ? parity : 0;
const int v_parity = (V.Nparity() == 2) ? parity : 0;
#pragma unroll
for (int s=0; s<fineSpin; s++)
#pragma unroll
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
out[s*coarse_colors_per_thread+coarse_color_local] = 0.0;
}
#pragma unroll
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
int i = coarse_color_block + coarse_color_local;
#pragma unroll
for (int s=0; s<fineSpin; s++) {
constexpr int color_unroll = fineColor == 3 ? 3 : 2;
complex<Float> partial[color_unroll];
#pragma unroll
for (int k=0; k<color_unroll; k++) partial[k] = 0.0;
#pragma unroll
for (int j=0; j<fineColor; j+=color_unroll) {
#pragma unroll
for (int k=0; k<color_unroll; k++)
partial[k] += conj(V(v_parity, x_cb, s, j+k, i)) * in(spinor_parity, x_cb, s, j+k);
}
#pragma unroll
for (int k=0; k<color_unroll; k++) out[s*coarse_colors_per_thread + coarse_color_local] += partial[k];
}
}
}
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor, int coarse_colors_per_thread, typename Arg>
void Restrict(Arg arg) {
for (int parity_coarse=0; parity_coarse<2; parity_coarse++)
for (int x_coarse_cb=0; x_coarse_cb<arg.out.VolumeCB(); x_coarse_cb++)
for (int s=0; s<coarseSpin; s++)
for (int c=0; c<coarseColor; c++)
arg.out(parity_coarse, x_coarse_cb, s, c) = 0.0;
// loop over fine degrees of freedom
for (int parity=0; parity<arg.nParity; parity++) {
parity = (arg.nParity == 2) ? parity : arg.parity;
for (int x_cb=0; x_cb<arg.in.VolumeCB(); x_cb++) {
int x = parity*arg.in.VolumeCB() + x_cb;
int x_coarse = arg.fine_to_coarse[x];
int parity_coarse = (x_coarse >= arg.out.VolumeCB()) ? 1 : 0;
int x_coarse_cb = x_coarse - parity_coarse*arg.out.VolumeCB();
for (int coarse_color_block=0; coarse_color_block<coarseColor; coarse_color_block+=coarse_colors_per_thread) {
complex<Float> tmp[fineSpin*coarse_colors_per_thread];
rotateCoarseColor<Float,fineSpin,fineColor,coarseColor,coarse_colors_per_thread>
(tmp, arg.in, arg.V, parity, arg.nParity, x_cb, coarse_color_block);
for (int s=0; s<fineSpin; s++) {
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
int c = coarse_color_block + coarse_color_local;
arg.out(parity_coarse,x_coarse_cb,arg.spin_map(s),c) += tmp[s*coarse_colors_per_thread+coarse_color_local];
}
}
}
}
}
}
/**
struct which acts as a wrapper to a vector of data.
*/
template <typename scalar, int n>
struct vector_type {
scalar data[n];
__device__ __host__ inline scalar& operator[](int i) { return data[i]; }
__device__ __host__ inline const scalar& operator[](int i) const { return data[i]; }
__device__ __host__ inline static constexpr int size() { return n; }
__device__ __host__ vector_type() { for (int i=0; i<n; i++) data[i] = 0.0; }
};
/**
functor that defines how to do a multi-vector reduction
*/
template <typename T>
struct reduce {
__device__ __host__ inline T operator()(const T &a, const T &b) {
T sum;
for (int i=0; i<sum.size(); i++) sum[i] = a[i] + b[i];
return sum;
}
};
/**
Here, we ensure that each thread block maps exactly to a
geometric block. Each thread block corresponds to one geometric
block, with number of threads equal to the number of fine grid
points per aggregate, so each thread represents a fine-grid
point. The look up table coarse_to_fine is the mapping to
each fine grid point.
*/
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor, int coarse_colors_per_thread,
typename Arg, int block_size>
__global__ void RestrictKernel(Arg arg) {
#ifdef SWIZZLE
// the portion of the grid that is exactly divisible by the number of SMs
const int gridp = gridDim.x - gridDim.x % arg.swizzle;
int x_coarse = blockIdx.x;
if (blockIdx.x < gridp) {
// this is the portion of the block that we are going to transpose
const int i = blockIdx.x % arg.swizzle;
const int j = blockIdx.x / arg.swizzle;
// tranpose the coordinates
x_coarse = i * (gridp / arg.swizzle) + j;
}
#else
int x_coarse = blockIdx.x;
#endif
int parity_coarse = x_coarse >= arg.out.VolumeCB() ? 1 : 0;
int x_coarse_cb = x_coarse - parity_coarse*arg.out.VolumeCB();
// obtain fine index from this look up table
// since both parities map to the same block, each thread block must do both parities
// threadIdx.x - fine checkboard offset
// threadIdx.y - fine parity offset
// blockIdx.x - which coarse block are we working on (swizzled to improve cache efficiency)
// assume that coarse_to_fine look up map is ordered as (coarse-block-id + fine-point-id)
// and that fine-point-id is parity ordered
int parity = arg.nParity == 2 ? threadIdx.y : arg.parity;
int x_fine = arg.coarse_to_fine[ (x_coarse*2 + parity) * blockDim.x + threadIdx.x];
int x_fine_cb = x_fine - parity*arg.in.VolumeCB();
int coarse_color_block = (blockDim.z*blockIdx.z + threadIdx.z) * coarse_colors_per_thread;
if (coarse_color_block >= coarseColor) return;
complex<Float> tmp[fineSpin*coarse_colors_per_thread];
rotateCoarseColor<Float,fineSpin,fineColor,coarseColor,coarse_colors_per_thread>
(tmp, arg.in, arg.V, parity, arg.nParity, x_fine_cb, coarse_color_block);
typedef vector_type<complex<Float>, coarseSpin*coarse_colors_per_thread> vector;
vector reduced;
// first lets coarsen spin locally
for (int s=0; s<fineSpin; s++) {
for (int v=0; v<coarse_colors_per_thread; v++) {
reduced[arg.spin_map(s)*coarse_colors_per_thread+v] += tmp[s*coarse_colors_per_thread+v];
}
}
// now lets coarsen geometry across threads
if (arg.nParity == 2) {
typedef hipcub::BlockReduce<vector, block_size, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, 2> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
reduce<vector> reducer; // reduce functor
// note this is not safe for blockDim.z > 1
reduced = BlockReduce(temp_storage).Reduce(reduced, reducer);
} else {
typedef hipcub::BlockReduce<vector, block_size, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
reduce<vector> reducer; // reduce functor
// note this is not safe for blockDim.z > 1
reduced = BlockReduce(temp_storage).Reduce(reduced, reducer);
}
if (threadIdx.x==0 && threadIdx.y == 0) {
for (int s=0; s<coarseSpin; s++) {
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
int v = coarse_color_block + coarse_color_local;
arg.out(parity_coarse, x_coarse_cb, s, v) = reduced[s*coarse_colors_per_thread+coarse_color_local];
}
}
}
}
template <typename Float, typename Arg, int fineSpin, int fineColor, int coarseSpin, int coarseColor,
int coarse_colors_per_thread>
class RestrictLaunch : public Tunable {
protected:
Arg &arg;
QudaFieldLocation location;
const int block_size;
char vol[TuneKey::volume_n];
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.in.VolumeCB(); } // fine parity is the block y dimension
public:
RestrictLaunch(Arg &arg, const ColorSpinorField &coarse, const ColorSpinorField &fine,
const QudaFieldLocation location)
: arg(arg), location(location), block_size((arg.in.VolumeCB())/(2*arg.out.VolumeCB())) {
strcpy(vol, coarse.VolString());
strcat(vol, ",");
strcat(vol, fine.VolString());
strcpy(aux, coarse.AuxString());
strcat(aux, ",");
strcat(aux, fine.AuxString());
} // block size is checkerboard fine length / full coarse length
virtual ~RestrictLaunch() { }
void apply(const hipStream_t &stream) {
if (location == QUDA_CPU_FIELD_LOCATION) {
Restrict<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread>(arg);
} else {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
arg.swizzle = tp.aux.x;
if (block_size == 8) { // for 2x2x2x2 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,8>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 16) { // for 4x2x2x2 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,16>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 27) { // for 3x3x3x2 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,27>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 32) { // for 4x4x2x2 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,32>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 36) { // for 3x3x4x2 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,36>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 40) { // for 5x4x2x2 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,36>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 54) { // for 3x3x3x4 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,54>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 64) { // for 4x4x2x4 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,64>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 100) { // for 5x5x2x4 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,100>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 108) { // for 6x3x3x4 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,108>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 128) { // for 4x4x4x4 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,128>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 200) { // for 5x5x2x8 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,200>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 216) { // for 6x6x6x2 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,216>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 256) { // for 4x4x4x8 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,256>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
#if __COMPUTE_CAPABILITY__ >= 300
} else if (block_size == 432) { // for 6x6x6x4 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,432>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 500) { // 5x5x5x8 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,500>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 512) { // 8x8x4x4 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,512>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 648) { // 6x6x6x6 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,648>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 768) { // 8x8x6x4 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,768>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 1024) { // 8x8x8x4 aggregates
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,1024>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
#endif
} else {
errorQuda("Block size %d not instantiated", block_size);
}
}
}
// This block tuning tunes for the optimal amount of color
// splitting between blockDim.z and gridDim.z. However, enabling
// blockDim.z > 1 gives incorrect results due to cub reductions
// being unable to do independent sliced reductions along
// blockDim.z. So for now we only split between colors per thread
// and grid.z.
bool advanceBlockDim(TuneParam ¶m) const
{
// let's try to advance spin/block-color
while(param.block.z <= coarseColor/coarse_colors_per_thread) {
param.block.z++;
if ( (coarseColor/coarse_colors_per_thread) % param.block.z == 0) {
param.grid.z = (coarseColor/coarse_colors_per_thread) / param.block.z;
break;
}
}
// we can advance spin/block-color since this is valid
if (param.block.z <= (coarseColor/coarse_colors_per_thread) ) { //
return true;
} else { // we have run off the end so let's reset
param.block.z = 1;
param.grid.z = coarseColor/coarse_colors_per_thread;
return false;
}
}
int tuningIter() const { return 3; }
bool advanceAux(TuneParam ¶m) const
{
#ifdef SWIZZLE
if (param.aux.x < 2*deviceProp.multiProcessorCount) {
param.aux.x++;
return true;
} else {
param.aux.x = 1;
return false;
}
#else
return false;
#endif
}
// only tune shared memory per thread (disable tuning for block.z for now)
bool advanceTuneParam(TuneParam ¶m) const { return advanceSharedBytes(param) || advanceAux(param); }
TuneKey tuneKey() const { return TuneKey(vol, typeid(*this).name(), aux); }
void initTuneParam(TuneParam ¶m) const { defaultTuneParam(param); }
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const {
param.block = dim3(block_size, arg.nParity, 1);
param.grid = dim3( (minThreads()+param.block.x-1) / param.block.x, 1, 1);
param.shared_bytes = 0;
param.block.z = 1;
param.grid.z = coarseColor / coarse_colors_per_thread;
param.aux.x = 1; // swizzle factor
}
long long flops() const { return 8 * fineSpin * fineColor * coarseColor * arg.nParity*arg.in.VolumeCB(); }
long long bytes() const {
size_t v_bytes = arg.V.Bytes() / (arg.V.Nparity() == arg.in.Nparity() ? 1 : 2);
return arg.in.Bytes() + arg.out.Bytes() + v_bytes + arg.nParity*arg.in.VolumeCB()*sizeof(int);
}
};
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor, QudaFieldOrder order>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
const int *fine_to_coarse, const int *coarse_to_fine, int parity) {
typedef FieldOrderCB<Float,fineSpin,fineColor,1,order> fineSpinor;
typedef FieldOrderCB<Float,coarseSpin,coarseColor,1,order> coarseSpinor;
typedef FieldOrderCB<Float,fineSpin,fineColor,coarseColor,order> packedSpinor;
typedef RestrictArg<coarseSpinor,fineSpinor,packedSpinor,fineSpin,coarseSpin> Arg;
coarseSpinor Out(const_cast<ColorSpinorField&>(out));
fineSpinor In(const_cast<ColorSpinorField&>(in));
packedSpinor V(const_cast<ColorSpinorField&>(v));
// for fine grids (Nc=3) have more parallelism so can use more coarse strategy
constexpr int coarse_colors_per_thread = fineColor == 3 ? 8 : 2;
Arg arg(Out, In, V, fine_to_coarse, coarse_to_fine, parity, in);
RestrictLaunch<Float, Arg, fineSpin, fineColor, coarseSpin, coarseColor, coarse_colors_per_thread> restrictor(arg, out, in, Location(out, in, v));
restrictor.apply(0);
if (Location(out, in, v) == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
}
template <typename Float, int fineSpin, QudaFieldOrder order>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int nVec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map, int parity) {
if (out.Nspin() != 2) errorQuda("Unsupported nSpin %d", out.Nspin());
const int coarseSpin = 2;
// first check that the spin_map matches the spin_mapper
spin_mapper<fineSpin,coarseSpin> mapper;
for (int s=0; s<fineSpin; s++)
if (mapper(s) != spin_map[s]) errorQuda("Spin map does not match spin_mapper");
// Template over fine color
if (in.Ncolor() == 3) { // standard QCD
const int fineColor = 3;
if (nVec == 2) {
Restrict<Float,fineSpin,fineColor,coarseSpin,2,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else if (nVec == 4) {
Restrict<Float,fineSpin,fineColor,coarseSpin,4,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else if (nVec == 24) {
Restrict<Float,fineSpin,fineColor,coarseSpin,24,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else if (nVec == 32) {
Restrict<Float,fineSpin,fineColor,coarseSpin,32,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
} else if (in.Ncolor() == 2) {
const int fineColor = 2;
if (nVec == 2) { // these are probably only for debugging only
Restrict<Float,fineSpin,fineColor,coarseSpin,2,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else if (nVec == 4) {
Restrict<Float,fineSpin,fineColor,coarseSpin,4,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
} else if (in.Ncolor() == 24) { // to keep compilation under control coarse grids have same or more colors
const int fineColor = 24;
if (nVec == 24) {
Restrict<Float,fineSpin,fineColor,coarseSpin,24,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else if (nVec == 32) {
Restrict<Float,fineSpin,fineColor,coarseSpin,32,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
} else if (in.Ncolor() == 32) {
const int fineColor = 32;
if (nVec == 32) {
Restrict<Float,fineSpin,fineColor,coarseSpin,32,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
} else {
errorQuda("Unsupported nColor %d", in.Ncolor());
}
}
template <typename Float, QudaFieldOrder order>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map, int parity) {
if (in.Nspin() == 4) {
Restrict<Float,4,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity);
} else if (in.Nspin() == 2) {
Restrict<Float,2,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity);
#if GPU_STAGGERED_DIRAC
} else if (in.Nspin() == 1) {
Restrict<Float,1,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity);
#endif
} else {
errorQuda("Unsupported nSpin %d", in.Nspin());
}
}
template <typename Float>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map, int parity) {
if (out.FieldOrder() != in.FieldOrder() || out.FieldOrder() != v.FieldOrder())
errorQuda("Field orders do not match (out=%d, in=%d, v=%d)",
out.FieldOrder(), in.FieldOrder(), v.FieldOrder());
if (out.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER) {
Restrict<Float,QUDA_FLOAT2_FIELD_ORDER>
(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity);
} else if (out.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER) {
Restrict<Float,QUDA_SPACE_SPIN_COLOR_FIELD_ORDER>
(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity);
} else {
errorQuda("Unsupported field type %d", out.FieldOrder());
}
}
#endif // GPU_MULTIGRID
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map, int parity) {
#ifdef GPU_MULTIGRID
if (out.Precision() != in.Precision() || v.Precision() != in.Precision())
errorQuda("Precision mismatch out=%d in=%d v=%d", out.Precision(), in.Precision(), v.Precision());
if (out.Precision() == QUDA_DOUBLE_PRECISION) {
#ifdef GPU_MULTIGRID_DOUBLE
Restrict<double>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity);
#else
errorQuda("Double precision multigrid has not been enabled");
#endif
} else if (out.Precision() == QUDA_SINGLE_PRECISION) {
Restrict<float>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity);
} else {
errorQuda("Unsupported precision %d", out.Precision());
}
#else
errorQuda("Multigrid has not been built");
#endif
}
} // namespace quda
| a289327a76b519c20fd425ac53b64ded1882034d.cu | #include <color_spinor_field.h>
#include <color_spinor_field_order.h>
#include <tune_quda.h>
#include <cub/cub.cuh>
#include <typeinfo>
#include <multigrid_helper.cuh>
// enabling CTA swizzling improves spatial locality of MG blocks reducing cache line wastage
#define SWIZZLE
namespace quda {
#ifdef GPU_MULTIGRID
using namespace quda::colorspinor;
/**
Kernel argument struct
*/
template <typename Out, typename In, typename Rotator, int fineSpin, int coarseSpin>
struct RestrictArg {
Out out;
const In in;
const Rotator V;
const int *fine_to_coarse;
const int *coarse_to_fine;
const spin_mapper<fineSpin,coarseSpin> spin_map;
const int parity; // the parity of the input field (if single parity)
const int nParity; // number of parities of input fine field
int swizzle; // swizzle factor for transposing blockIdx.x mappnig to coarse grid coordinate
RestrictArg(Out &out, const In &in, const Rotator &V,
const int *fine_to_coarse, const int *coarse_to_fine,
int parity, const ColorSpinorField &meta) :
out(out), in(in), V(V), fine_to_coarse(fine_to_coarse), coarse_to_fine(coarse_to_fine),
spin_map(), parity(parity), nParity(meta.SiteSubset()), swizzle(1)
{ }
RestrictArg(const RestrictArg<Out,In,Rotator,fineSpin,coarseSpin> &arg) :
out(arg.out), in(arg.in), V(arg.V),
fine_to_coarse(arg.fine_to_coarse), coarse_to_fine(arg.coarse_to_fine), spin_map(),
parity(arg.parity), nParity(arg.nParity), swizzle(arg.swizzle)
{ }
};
/**
Rotates from the fine-color basis into the coarse-color basis.
*/
template <typename Float, int fineSpin, int fineColor, int coarseColor, int coarse_colors_per_thread,
class FineColor, class Rotator>
__device__ __host__ inline void rotateCoarseColor(complex<Float> out[fineSpin*coarse_colors_per_thread],
const FineColor &in, const Rotator &V,
int parity, int nParity, int x_cb, int coarse_color_block) {
const int spinor_parity = (nParity == 2) ? parity : 0;
const int v_parity = (V.Nparity() == 2) ? parity : 0;
#pragma unroll
for (int s=0; s<fineSpin; s++)
#pragma unroll
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
out[s*coarse_colors_per_thread+coarse_color_local] = 0.0;
}
#pragma unroll
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
int i = coarse_color_block + coarse_color_local;
#pragma unroll
for (int s=0; s<fineSpin; s++) {
constexpr int color_unroll = fineColor == 3 ? 3 : 2;
complex<Float> partial[color_unroll];
#pragma unroll
for (int k=0; k<color_unroll; k++) partial[k] = 0.0;
#pragma unroll
for (int j=0; j<fineColor; j+=color_unroll) {
#pragma unroll
for (int k=0; k<color_unroll; k++)
partial[k] += conj(V(v_parity, x_cb, s, j+k, i)) * in(spinor_parity, x_cb, s, j+k);
}
#pragma unroll
for (int k=0; k<color_unroll; k++) out[s*coarse_colors_per_thread + coarse_color_local] += partial[k];
}
}
}
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor, int coarse_colors_per_thread, typename Arg>
void Restrict(Arg arg) {
for (int parity_coarse=0; parity_coarse<2; parity_coarse++)
for (int x_coarse_cb=0; x_coarse_cb<arg.out.VolumeCB(); x_coarse_cb++)
for (int s=0; s<coarseSpin; s++)
for (int c=0; c<coarseColor; c++)
arg.out(parity_coarse, x_coarse_cb, s, c) = 0.0;
// loop over fine degrees of freedom
for (int parity=0; parity<arg.nParity; parity++) {
parity = (arg.nParity == 2) ? parity : arg.parity;
for (int x_cb=0; x_cb<arg.in.VolumeCB(); x_cb++) {
int x = parity*arg.in.VolumeCB() + x_cb;
int x_coarse = arg.fine_to_coarse[x];
int parity_coarse = (x_coarse >= arg.out.VolumeCB()) ? 1 : 0;
int x_coarse_cb = x_coarse - parity_coarse*arg.out.VolumeCB();
for (int coarse_color_block=0; coarse_color_block<coarseColor; coarse_color_block+=coarse_colors_per_thread) {
complex<Float> tmp[fineSpin*coarse_colors_per_thread];
rotateCoarseColor<Float,fineSpin,fineColor,coarseColor,coarse_colors_per_thread>
(tmp, arg.in, arg.V, parity, arg.nParity, x_cb, coarse_color_block);
for (int s=0; s<fineSpin; s++) {
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
int c = coarse_color_block + coarse_color_local;
arg.out(parity_coarse,x_coarse_cb,arg.spin_map(s),c) += tmp[s*coarse_colors_per_thread+coarse_color_local];
}
}
}
}
}
}
/**
struct which acts as a wrapper to a vector of data.
*/
template <typename scalar, int n>
struct vector_type {
scalar data[n];
__device__ __host__ inline scalar& operator[](int i) { return data[i]; }
__device__ __host__ inline const scalar& operator[](int i) const { return data[i]; }
__device__ __host__ inline static constexpr int size() { return n; }
__device__ __host__ vector_type() { for (int i=0; i<n; i++) data[i] = 0.0; }
};
/**
functor that defines how to do a multi-vector reduction
*/
template <typename T>
struct reduce {
__device__ __host__ inline T operator()(const T &a, const T &b) {
T sum;
for (int i=0; i<sum.size(); i++) sum[i] = a[i] + b[i];
return sum;
}
};
/**
Here, we ensure that each thread block maps exactly to a
geometric block. Each thread block corresponds to one geometric
block, with number of threads equal to the number of fine grid
points per aggregate, so each thread represents a fine-grid
point. The look up table coarse_to_fine is the mapping to
each fine grid point.
*/
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor, int coarse_colors_per_thread,
typename Arg, int block_size>
__global__ void RestrictKernel(Arg arg) {
#ifdef SWIZZLE
// the portion of the grid that is exactly divisible by the number of SMs
const int gridp = gridDim.x - gridDim.x % arg.swizzle;
int x_coarse = blockIdx.x;
if (blockIdx.x < gridp) {
// this is the portion of the block that we are going to transpose
const int i = blockIdx.x % arg.swizzle;
const int j = blockIdx.x / arg.swizzle;
// tranpose the coordinates
x_coarse = i * (gridp / arg.swizzle) + j;
}
#else
int x_coarse = blockIdx.x;
#endif
int parity_coarse = x_coarse >= arg.out.VolumeCB() ? 1 : 0;
int x_coarse_cb = x_coarse - parity_coarse*arg.out.VolumeCB();
// obtain fine index from this look up table
// since both parities map to the same block, each thread block must do both parities
// threadIdx.x - fine checkboard offset
// threadIdx.y - fine parity offset
// blockIdx.x - which coarse block are we working on (swizzled to improve cache efficiency)
// assume that coarse_to_fine look up map is ordered as (coarse-block-id + fine-point-id)
// and that fine-point-id is parity ordered
int parity = arg.nParity == 2 ? threadIdx.y : arg.parity;
int x_fine = arg.coarse_to_fine[ (x_coarse*2 + parity) * blockDim.x + threadIdx.x];
int x_fine_cb = x_fine - parity*arg.in.VolumeCB();
int coarse_color_block = (blockDim.z*blockIdx.z + threadIdx.z) * coarse_colors_per_thread;
if (coarse_color_block >= coarseColor) return;
complex<Float> tmp[fineSpin*coarse_colors_per_thread];
rotateCoarseColor<Float,fineSpin,fineColor,coarseColor,coarse_colors_per_thread>
(tmp, arg.in, arg.V, parity, arg.nParity, x_fine_cb, coarse_color_block);
typedef vector_type<complex<Float>, coarseSpin*coarse_colors_per_thread> vector;
vector reduced;
// first lets coarsen spin locally
for (int s=0; s<fineSpin; s++) {
for (int v=0; v<coarse_colors_per_thread; v++) {
reduced[arg.spin_map(s)*coarse_colors_per_thread+v] += tmp[s*coarse_colors_per_thread+v];
}
}
// now lets coarsen geometry across threads
if (arg.nParity == 2) {
typedef cub::BlockReduce<vector, block_size, cub::BLOCK_REDUCE_WARP_REDUCTIONS, 2> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
reduce<vector> reducer; // reduce functor
// note this is not safe for blockDim.z > 1
reduced = BlockReduce(temp_storage).Reduce(reduced, reducer);
} else {
typedef cub::BlockReduce<vector, block_size, cub::BLOCK_REDUCE_WARP_REDUCTIONS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
reduce<vector> reducer; // reduce functor
// note this is not safe for blockDim.z > 1
reduced = BlockReduce(temp_storage).Reduce(reduced, reducer);
}
if (threadIdx.x==0 && threadIdx.y == 0) {
for (int s=0; s<coarseSpin; s++) {
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
int v = coarse_color_block + coarse_color_local;
arg.out(parity_coarse, x_coarse_cb, s, v) = reduced[s*coarse_colors_per_thread+coarse_color_local];
}
}
}
}
template <typename Float, typename Arg, int fineSpin, int fineColor, int coarseSpin, int coarseColor,
int coarse_colors_per_thread>
class RestrictLaunch : public Tunable {
protected:
Arg &arg;
QudaFieldLocation location;
const int block_size;
char vol[TuneKey::volume_n];
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.in.VolumeCB(); } // fine parity is the block y dimension
public:
RestrictLaunch(Arg &arg, const ColorSpinorField &coarse, const ColorSpinorField &fine,
const QudaFieldLocation location)
: arg(arg), location(location), block_size((arg.in.VolumeCB())/(2*arg.out.VolumeCB())) {
strcpy(vol, coarse.VolString());
strcat(vol, ",");
strcat(vol, fine.VolString());
strcpy(aux, coarse.AuxString());
strcat(aux, ",");
strcat(aux, fine.AuxString());
} // block size is checkerboard fine length / full coarse length
virtual ~RestrictLaunch() { }
void apply(const cudaStream_t &stream) {
if (location == QUDA_CPU_FIELD_LOCATION) {
Restrict<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread>(arg);
} else {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
arg.swizzle = tp.aux.x;
if (block_size == 8) { // for 2x2x2x2 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,8>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 16) { // for 4x2x2x2 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,16>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 27) { // for 3x3x3x2 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,27>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 32) { // for 4x4x2x2 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,32>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 36) { // for 3x3x4x2 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,36>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 40) { // for 5x4x2x2 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,36>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 54) { // for 3x3x3x4 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,54>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 64) { // for 4x4x2x4 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,64>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 100) { // for 5x5x2x4 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,100>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 108) { // for 6x3x3x4 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,108>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 128) { // for 4x4x4x4 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,128>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 200) { // for 5x5x2x8 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,200>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 216) { // for 6x6x6x2 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,216>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 256) { // for 4x4x4x8 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,256>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
#if __COMPUTE_CAPABILITY__ >= 300
} else if (block_size == 432) { // for 6x6x6x4 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,432>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 500) { // 5x5x5x8 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,500>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 512) { // 8x8x4x4 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,512>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 648) { // 6x6x6x6 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,648>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 768) { // 8x8x6x4 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,768>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 1024) { // 8x8x8x4 aggregates
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,1024>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
#endif
} else {
errorQuda("Block size %d not instantiated", block_size);
}
}
}
// This block tuning tunes for the optimal amount of color
// splitting between blockDim.z and gridDim.z. However, enabling
// blockDim.z > 1 gives incorrect results due to cub reductions
// being unable to do independent sliced reductions along
// blockDim.z. So for now we only split between colors per thread
// and grid.z.
bool advanceBlockDim(TuneParam ¶m) const
{
// let's try to advance spin/block-color
while(param.block.z <= coarseColor/coarse_colors_per_thread) {
param.block.z++;
if ( (coarseColor/coarse_colors_per_thread) % param.block.z == 0) {
param.grid.z = (coarseColor/coarse_colors_per_thread) / param.block.z;
break;
}
}
// we can advance spin/block-color since this is valid
if (param.block.z <= (coarseColor/coarse_colors_per_thread) ) { //
return true;
} else { // we have run off the end so let's reset
param.block.z = 1;
param.grid.z = coarseColor/coarse_colors_per_thread;
return false;
}
}
int tuningIter() const { return 3; }
bool advanceAux(TuneParam ¶m) const
{
#ifdef SWIZZLE
if (param.aux.x < 2*deviceProp.multiProcessorCount) {
param.aux.x++;
return true;
} else {
param.aux.x = 1;
return false;
}
#else
return false;
#endif
}
// only tune shared memory per thread (disable tuning for block.z for now)
bool advanceTuneParam(TuneParam ¶m) const { return advanceSharedBytes(param) || advanceAux(param); }
TuneKey tuneKey() const { return TuneKey(vol, typeid(*this).name(), aux); }
void initTuneParam(TuneParam ¶m) const { defaultTuneParam(param); }
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const {
param.block = dim3(block_size, arg.nParity, 1);
param.grid = dim3( (minThreads()+param.block.x-1) / param.block.x, 1, 1);
param.shared_bytes = 0;
param.block.z = 1;
param.grid.z = coarseColor / coarse_colors_per_thread;
param.aux.x = 1; // swizzle factor
}
long long flops() const { return 8 * fineSpin * fineColor * coarseColor * arg.nParity*arg.in.VolumeCB(); }
long long bytes() const {
size_t v_bytes = arg.V.Bytes() / (arg.V.Nparity() == arg.in.Nparity() ? 1 : 2);
return arg.in.Bytes() + arg.out.Bytes() + v_bytes + arg.nParity*arg.in.VolumeCB()*sizeof(int);
}
};
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor, QudaFieldOrder order>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
const int *fine_to_coarse, const int *coarse_to_fine, int parity) {
typedef FieldOrderCB<Float,fineSpin,fineColor,1,order> fineSpinor;
typedef FieldOrderCB<Float,coarseSpin,coarseColor,1,order> coarseSpinor;
typedef FieldOrderCB<Float,fineSpin,fineColor,coarseColor,order> packedSpinor;
typedef RestrictArg<coarseSpinor,fineSpinor,packedSpinor,fineSpin,coarseSpin> Arg;
coarseSpinor Out(const_cast<ColorSpinorField&>(out));
fineSpinor In(const_cast<ColorSpinorField&>(in));
packedSpinor V(const_cast<ColorSpinorField&>(v));
// for fine grids (Nc=3) have more parallelism so can use more coarse strategy
constexpr int coarse_colors_per_thread = fineColor == 3 ? 8 : 2;
Arg arg(Out, In, V, fine_to_coarse, coarse_to_fine, parity, in);
RestrictLaunch<Float, Arg, fineSpin, fineColor, coarseSpin, coarseColor, coarse_colors_per_thread> restrictor(arg, out, in, Location(out, in, v));
restrictor.apply(0);
if (Location(out, in, v) == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
}
template <typename Float, int fineSpin, QudaFieldOrder order>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int nVec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map, int parity) {
if (out.Nspin() != 2) errorQuda("Unsupported nSpin %d", out.Nspin());
const int coarseSpin = 2;
// first check that the spin_map matches the spin_mapper
spin_mapper<fineSpin,coarseSpin> mapper;
for (int s=0; s<fineSpin; s++)
if (mapper(s) != spin_map[s]) errorQuda("Spin map does not match spin_mapper");
// Template over fine color
if (in.Ncolor() == 3) { // standard QCD
const int fineColor = 3;
if (nVec == 2) {
Restrict<Float,fineSpin,fineColor,coarseSpin,2,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else if (nVec == 4) {
Restrict<Float,fineSpin,fineColor,coarseSpin,4,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else if (nVec == 24) {
Restrict<Float,fineSpin,fineColor,coarseSpin,24,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else if (nVec == 32) {
Restrict<Float,fineSpin,fineColor,coarseSpin,32,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
} else if (in.Ncolor() == 2) {
const int fineColor = 2;
if (nVec == 2) { // these are probably only for debugging only
Restrict<Float,fineSpin,fineColor,coarseSpin,2,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else if (nVec == 4) {
Restrict<Float,fineSpin,fineColor,coarseSpin,4,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
} else if (in.Ncolor() == 24) { // to keep compilation under control coarse grids have same or more colors
const int fineColor = 24;
if (nVec == 24) {
Restrict<Float,fineSpin,fineColor,coarseSpin,24,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else if (nVec == 32) {
Restrict<Float,fineSpin,fineColor,coarseSpin,32,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
} else if (in.Ncolor() == 32) {
const int fineColor = 32;
if (nVec == 32) {
Restrict<Float,fineSpin,fineColor,coarseSpin,32,order>(out, in, v, fine_to_coarse, coarse_to_fine, parity);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
} else {
errorQuda("Unsupported nColor %d", in.Ncolor());
}
}
template <typename Float, QudaFieldOrder order>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map, int parity) {
if (in.Nspin() == 4) {
Restrict<Float,4,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity);
} else if (in.Nspin() == 2) {
Restrict<Float,2,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity);
#if GPU_STAGGERED_DIRAC
} else if (in.Nspin() == 1) {
Restrict<Float,1,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity);
#endif
} else {
errorQuda("Unsupported nSpin %d", in.Nspin());
}
}
template <typename Float>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map, int parity) {
if (out.FieldOrder() != in.FieldOrder() || out.FieldOrder() != v.FieldOrder())
errorQuda("Field orders do not match (out=%d, in=%d, v=%d)",
out.FieldOrder(), in.FieldOrder(), v.FieldOrder());
if (out.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER) {
Restrict<Float,QUDA_FLOAT2_FIELD_ORDER>
(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity);
} else if (out.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER) {
Restrict<Float,QUDA_SPACE_SPIN_COLOR_FIELD_ORDER>
(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity);
} else {
errorQuda("Unsupported field type %d", out.FieldOrder());
}
}
#endif // GPU_MULTIGRID
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map, int parity) {
#ifdef GPU_MULTIGRID
if (out.Precision() != in.Precision() || v.Precision() != in.Precision())
errorQuda("Precision mismatch out=%d in=%d v=%d", out.Precision(), in.Precision(), v.Precision());
if (out.Precision() == QUDA_DOUBLE_PRECISION) {
#ifdef GPU_MULTIGRID_DOUBLE
Restrict<double>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity);
#else
errorQuda("Double precision multigrid has not been enabled");
#endif
} else if (out.Precision() == QUDA_SINGLE_PRECISION) {
Restrict<float>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity);
} else {
errorQuda("Unsupported precision %d", out.Precision());
}
#else
errorQuda("Multigrid has not been built");
#endif
}
} // namespace quda
|
7ef47fb36e16d6e7639b46c9db32c714acb64bc7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************
c* Multimodal Deformable Image Registration *
c* via Mutual Information or Bhattacharyya Distantce *
c* Version: 1.0 *
c* Language: C, CUDA *
c* *
c* Developer: Yifei Lou *
c* Email: [email protected] *
c* *
c* School of Electrical and Computer Engineering *
c* Georgia Institute of Technology *
c* Atlanta, GA, 30318 *
c* Website: http://groups.bme.gatech.edu/groups/bil/ *
c* *
c* Copyright (c) 2011 *
c* All rights reserved. *
c* *
c* Permission to use, copy, or modify this code and its *
c* documentation for scientific purpose is hereby granted *
c* without fee, provided that this copyright notice appear in *
c* all copies and that both that copyright notice and this *
c* permission notice appear in supporting documentation. The use *
c* for commercial purposes is prohibited without permission. *
c* *
c* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND *
c* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, *
c* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF *
c* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *
c* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR *
c* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *
c* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT *
c* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF*
c* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED *
c* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT *
c* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
c* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF *
c* THE POSSIBILITY OF SUCH DAMAGE. *
c* *
c******************************************************************/
/*******************************************************************
c* Short discription *
c* main function to register two images on the current scale *
c* including upsample and downsample *
c******************************************************************/
#ifndef _FUN_COMPUTE_CU_
#define _FUN_COMPUTE_CU_
// hash a point in the unit square to the index of
// the grid bucket that contains it
struct point_to_bucket_index : public thrust::unary_function<float2,unsigned int>
{
__host__ __device__
point_to_bucket_index(unsigned int width, unsigned int height)
:w(width),h(height){}
__host__ __device__
unsigned int operator()(float2 p) const
{
// find the raster indices of p's bucket
unsigned int x = static_cast<unsigned int>(p.x * (w-1));
unsigned int y = static_cast<unsigned int>(p.y * (h-1));
// return the bucket's linear index
return y * w + x;
}
unsigned int w, h;
};
__global__ void downSample(float *src, float *dest, int NX, int NY, int NZ, int s)
{
const int tid = (blockIdx.y*NBLOCKX + blockIdx.x)*blockDim.x + threadIdx.x;
if(tid < NX*NY*NZ)
{
int z = tid/(NX*NY);
int y = (tid%(NX*NY))/NX;
int x = tid%NX;
float sum =0.0f;
for(int xs = 0; xs<s; xs++)
for(int ys =0; ys<s; ys++)
sum += src[s*x+xs + (s*y+ys)*NX0 + s*z*NX0*NY0];
dest[tid] = sum/s/s;
}
}
__global__ void upSample(float *src, float *dest, int NX, int NY, int NZ)
// upsampling
{
const int tid = (blockIdx.y*NBLOCKX + blockIdx.x)*blockDim.x + threadIdx.x;
if(tid < NX*NY*NZ)
{
int z = tid/(NX*NY);
int y = (tid%(NX*NY))/NX;
int x = tid%NX;
int xmin = x/2 - (x%2 == 0);
int xmax = x/2 + (x%2 == 1);
int ymin = y/2 - (y%2 == 0);
int ymax = y/2 + (y%2 == 1);
int zmin = z/2 - (z%2 == 0);
int zmax = z/2 + (z%2 == 1);
xmin = (xmin < 0) ? 0: xmin;
ymin = (ymin < 0) ? 0: ymin;
zmin = (zmin < 0) ? 0: zmin;
xmax = (xmax < NX)? xmax : NX-1;
ymax = (ymax < NY)? ymax : NY-1;
zmax = (zmax < NZ)? zmax : NZ-1;
float wx = 0.25 + 0.5*(x%2==0);
float wy = 0.25 + 0.5*(y%2==0);
float wz = 0.25 + 0.5*(z%2==0);
dest[tid] = src[xmin + ymin*NX/2 + zmin*NX/2*NY/2] * (1.0 - wx) * (1.0-wy) * (1.0-wz) +
src[xmax + ymin*NX/2 + zmin*NX/2*NY/2] * wx * (1.0-wy) * (1.0-wz) +
src[xmin + ymax*NX/2 + zmin*NX/2*NY/2] * (1.0 - wx) * wy * (1.0-wz) +
src[xmax + ymax*NX/2 + zmin*NX/2*NY/2] * wx * wy * (1.0-wz) +
src[xmin + ymin*NX/2 + zmax*NX/2*NY/2] * (1.0 - wx) * (1.0-wy) * wz +
src[xmax + ymin*NX/2 + zmax*NX/2*NY/2] * wx * (1.0-wy) * wz +
src[xmin + ymax*NX/2 + zmax*NX/2*NY/2] * (1.0 - wx) * wy * wz +
src[xmax + ymax*NX/2 + zmax*NX/2*NY/2] * wx * wy * wz;
dest[tid] = 2*dest[tid];
}
}
void compute(float *d_im_move, float *d_im_static, float *d_mv_x, float *d_mv_y, float *d_mv_z, int maxIter)
// d_mv_x, d_mv_y and d_im_move are updated
{
// bind moving image to texture
const hipExtent volumeSize = make_hipExtent(NX, NY, NZ);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
cutilSafeCall( hipMalloc3DArray(&d_im_move_array, &channelDesc, volumeSize) );
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr((void*)d_im_move, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_im_move_array;
copyParams.extent = volumeSize;
copyParams.kind = hipMemcpyDeviceToDevice;
cutilSafeCall( hipMemcpy3D(©Params) );
d_im_move_tex.normalized = false;
d_im_move_tex.filterMode = hipFilterModeLinear;
cutilSafeCall(hipBindTextureToArray(d_im_move_tex, d_im_move_array, channelDesc));
// bind vector flows to texture
cutilSafeCall( hipMalloc3DArray(&d_mv_x_array, &channelDesc, volumeSize) );
hipMemcpy3DParms copyParams_x = {0};
copyParams_x.srcPtr = make_hipPitchedPtr((void*)d_mv_x, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams_x.dstArray = d_mv_x_array;
copyParams_x.extent = volumeSize;
copyParams_x.kind = hipMemcpyDeviceToDevice;
cutilSafeCall( hipMemcpy3D(©Params_x) );
d_mv_x_tex.normalized = false;
d_mv_x_tex.filterMode = hipFilterModeLinear;
cutilSafeCall( hipMalloc3DArray(&d_mv_y_array, &channelDesc, volumeSize) );
hipMemcpy3DParms copyParams_y = {0};
copyParams_y.srcPtr = make_hipPitchedPtr((void*)d_mv_y, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams_y.dstArray = d_mv_y_array;
copyParams_y.extent = volumeSize;
copyParams_y.kind = hipMemcpyDeviceToDevice;
cutilSafeCall( hipMemcpy3D(©Params_y) );
d_mv_y_tex.normalized = false;
d_mv_y_tex.filterMode = hipFilterModeLinear;
cutilSafeCall( hipMalloc3DArray(&d_mv_z_array, &channelDesc, volumeSize) );
hipMemcpy3DParms copyParams_z = {0};
copyParams_z.srcPtr = make_hipPitchedPtr((void*)d_mv_z, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams_z.dstArray = d_mv_z_array;
copyParams_z.extent = volumeSize;
copyParams_z.kind = hipMemcpyDeviceToDevice;
cutilSafeCall( hipMemcpy3D(©Params_z) );
d_mv_z_tex.normalized = false;
d_mv_z_tex.filterMode = hipFilterModeLinear;
float *d_im_out;
cutilSafeCall( hipMalloc((void **)&d_im_out, sDATA_SIZE) );
// velocity
float *d_v_x, *d_v_x_copy;
float *d_v_y, *d_v_y_copy;
float *d_v_z, *d_v_z_copy;
cutilSafeCall( hipMalloc((void **)&d_v_x, sDATA_SIZE) );
cutilSafeCall( hipMalloc((void **)&d_v_y, sDATA_SIZE) );
cutilSafeCall( hipMalloc((void **)&d_v_z, sDATA_SIZE) );
cutilSafeCall( hipMalloc((void **)&d_v_x_copy, sDATA_SIZE) );
cutilSafeCall( hipMalloc((void **)&d_v_y_copy, sDATA_SIZE) );
cutilSafeCall( hipMalloc((void **)&d_v_z_copy, sDATA_SIZE) );
// setup for computing joint histogram via thrust
// the grid data structure keeps a range per grid bucket:
// each bucket_begin[i] indexes the first element of bucket i's list of points
// each bucket_end[i] indexes one past the last element of bucket i's list of points
thrust::device_vector<unsigned int> bucket_begin(nBin*nBin);
thrust::device_vector<unsigned int> bucket_end(nBin*nBin);
// allocate storage for each point's bucket index
thrust::device_vector<unsigned int> bucket_indices(NX*NY*NZ);
// allocate space to hold per-bucket sizes
thrust::device_vector<unsigned int> bucket_sizes(nBin*nBin);
// allocate float2 vector
float2 *d_points;
hipMalloc((void**) &d_points, sizeof(float2)*NX*NY*NZ);
int regrid = 0;
float MI[1000];
int3 Dims;
Dims.x = NX;
Dims.y = NY;
Dims.z = NZ;
for(int it=0; it<maxIter; it++)
{
// upate image
hipLaunchKernelGGL(( ImageWarp), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_mv_x, d_mv_y, d_mv_z, d_im_out, NX, NY, NZ);
// joint histogram via thrust ----- begin
// convert to float2 vector
hipLaunchKernelGGL(( transToFloat2), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_im_out, d_im_static, d_points, NX*NY*NZ);
// use a thrust ptr to wrap the raw pointer
thrust::device_ptr<float2> points_t(d_points);
// transform the points to their bucket indices
thrust::transform(points_t, points_t+NX*NY*NZ, bucket_indices.begin(), point_to_bucket_index(nBin,nBin));
// sort the bucket index
thrust::sort(bucket_indices.begin(), bucket_indices.end());
// find the beginning of each bucket's list of points
thrust::counting_iterator<unsigned int> search_begin(0);
thrust::lower_bound(bucket_indices.begin(), bucket_indices.end(), search_begin,
search_begin + nBin*nBin, bucket_begin.begin());
// find the end of each bucket's list of points
thrust::upper_bound(bucket_indices.begin(), bucket_indices.end(), search_begin,
search_begin + nBin*nBin, bucket_end.begin());
// take the difference between bounds to find each bucket size
thrust::transform(bucket_end.begin(), bucket_end.end(), bucket_begin.begin(),
bucket_sizes.begin(), thrust :: minus<unsigned int>());
// now hist contains the histogram
unsigned int *hist = thrust::raw_pointer_cast(&bucket_sizes[0]);
hipLaunchKernelGGL(( copyHist), dim3(nblocks_hist), dim3(NTHREAD_PER_BLOCK), 0, 0, hist, d_jointHistogram);
// joint histogram via thrust ----- end
// compute the convolution of joint histogram
hipLaunchKernelGGL(( myconv2dGPU), dim3(nblocks_hist), dim3(NTHREAD_PER_BLOCK), 0, 0, d_jointHistogram, d_jointHistogram_conv, GaussKernelH, nBin, nBin, 3*hValue);
// normalize joint histogram
float sum = hipblasSasum (nBin*nBin, d_jointHistogram_conv , 1);
hipblasSscal (nBin*nBin, 1.0f/sum, d_jointHistogram_conv, 1);
// compute mutual info by GPU
hipLaunchKernelGGL(( marginalDist), dim3(nBin), dim3(nBin), 0, 0, d_jointHistogram_conv, d_probx, d_proby);
switch (METHOD)
{
case 1:
hipLaunchKernelGGL(( marginalBnorm_sum), dim3(nblocks_hist), dim3(NTHREAD_PER_BLOCK), 0, 0, d_jointHistogram_conv, d_probx, d_proby, d_jointHistogram);
hipLaunchKernelGGL(( marginalDistAlongY), dim3(nBin), dim3(nBin), 0, 0, d_jointHistogram, d_Bsum);
hipLaunchKernelGGL(( BnormGPU), dim3(nblocks_hist), dim3(NTHREAD_PER_BLOCK), 0, 0, d_jointHistogram_conv, d_probx, d_proby,d_Bsum, d_jointHistogram);
break;
case 2:
hipLaunchKernelGGL(( mutualInfoGPU), dim3(nblocks_hist), dim3(NTHREAD_PER_BLOCK), 0, 0, d_jointHistogram_conv, d_probx, d_proby, d_jointHistogram);
break;
}
MI[it] = hipblasSasum (nBin*nBin, d_jointHistogram_conv, 1);
printf("mutual information (%d)= %f\n", it, MI[it]);
// NOTE: after this step, jointHistogram becomes the likelihood
// compute the first derivative w.r.t. x-dim of joint histogram
hipLaunchKernelGGL(( myconv2dGPU), dim3(nblocks_hist), dim3(NTHREAD_PER_BLOCK), 0, 0, d_jointHistogram, d_jointHistogram_conv, GaussKernelHx, nBin, nBin,3*hValue);
// compute the force
hipLaunchKernelGGL(( forceComp), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_im_out, d_im_static, d_jointHistogram_conv, d_v_x, d_v_y, d_v_z, NX, NY, NZ);
ImageSmooth(d_v_x, d_v_x_copy,Dims);
ImageSmooth(d_v_y, d_v_y_copy,Dims);
ImageSmooth(d_v_z, d_v_z_copy,Dims);
hipLaunchKernelGGL(( flowComp), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_mv_x, d_mv_y, d_mv_z, d_v_x_copy, d_v_y_copy, d_v_z_copy, d_v_x, d_v_y, NX, NY, NZ);
// NOTE: d_v_x is Jacobian, d_v_y is the max flow
// d_v_x_copy, d_v_y_copy, d_v_z_copy are the displacement
thrust :: device_ptr<float> data_ptr(d_v_y);
int maxInd = hipblasIsamax(NX*NY*NZ, d_v_y, 1) -1;
float maxflow = data_ptr[maxInd];
float dt = (du/maxflow); // > 1) ? 1 : du/maxflow;
printf("dt = %f \n", dt);
hipLaunchKernelGGL(( flowUpdate), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_mv_x, d_mv_y, d_mv_z, d_v_x_copy, d_v_y_copy, d_v_z_copy,dt, NX, NY, NZ);
// regridding if Jacobian < threshJaco
sum = hipblasSasum(NX*NY*NZ, d_v_x, 1);
if (sum>0.5)
{
regrid ++;
printf("regrid = %d\n", regrid);
// save d_im_move to be d_im_out
hipUnbindTexture(d_im_move_tex);
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr((void*)d_im_out, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_im_move_array;
copyParams.extent = volumeSize;
copyParams.kind = hipMemcpyDeviceToDevice;
cutilSafeCall( hipMemcpy3D(©Params) );
cutilSafeCall(hipBindTextureToArray(d_im_move_tex, d_im_move_array));
// update vector flow
hipLaunchKernelGGL(( ImageWarp_mv), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_mv_x, d_mv_y, d_mv_z, NX, NY, NZ);
hipMemcpy3DParms copyParams_x = {0};
copyParams_x.srcPtr = make_hipPitchedPtr((void*)d_mv_x, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams_x.dstArray = d_mv_x_array;
copyParams_x.extent = volumeSize;
copyParams_x.kind = hipMemcpyDeviceToDevice;
cutilSafeCall( hipMemcpy3D(©Params_x) );
cutilSafeCall(hipBindTextureToArray(d_mv_x_tex, d_mv_x_array));
hipMemcpy3DParms copyParams_y = {0};
copyParams_y.srcPtr = make_hipPitchedPtr((void*)d_mv_y, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams_y.dstArray = d_mv_y_array;
copyParams_y.extent = volumeSize;
copyParams_y.kind = hipMemcpyDeviceToDevice;
cutilSafeCall( hipMemcpy3D(©Params_y) );
cutilSafeCall(hipBindTextureToArray(d_mv_y_tex, d_mv_y_array));
hipMemcpy3DParms copyParams_z = {0};
copyParams_z.srcPtr = make_hipPitchedPtr((void*)d_mv_z, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams_z.dstArray = d_mv_z_array;
copyParams_z.extent = volumeSize;
copyParams_z.kind = hipMemcpyDeviceToDevice;
cutilSafeCall( hipMemcpy3D(©Params_z) );
cutilSafeCall(hipBindTextureToArray(d_mv_z_tex, d_mv_z_array));
cutilSafeCall( hipMemset(d_mv_x, 0, sDATA_SIZE) );
cutilSafeCall( hipMemset(d_mv_y, 0, sDATA_SIZE) );
cutilSafeCall( hipMemset(d_mv_z, 0, sDATA_SIZE) );
} // end for regridding
} // for-loop iteration
if (!regrid)
{
hipLaunchKernelGGL(( ImageWarp), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_mv_x, d_mv_y, d_mv_z, d_im_move, NX, NY, NZ);
}
else
{
hipMemcpy3DParms copyParams = {0};
hipUnbindTexture(d_im_move_tex);
copyParams.srcPtr = make_hipPitchedPtr((void*)d_im_move, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_im_move_array;
copyParams.extent = volumeSize;
copyParams.kind = hipMemcpyDeviceToDevice;
cutilSafeCall( hipMemcpy3D(©Params) );
cutilSafeCall(hipBindTextureToArray(d_im_move_tex, d_im_move_array));
hipLaunchKernelGGL(( ImageWarp_final), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_mv_x, d_mv_y, d_mv_z,d_im_move, NX, NY, NZ);
}
hipFree(d_points);
hipFree(d_v_x);
hipFree(d_v_y);
hipFree(d_v_z);
hipFree(d_v_x_copy);
hipFree(d_v_y_copy);
hipFree(d_v_z_copy);
hipUnbindTexture(d_im_move_tex);
hipFreeArray(d_im_move_array);
hipUnbindTexture(d_mv_x_tex);
hipFreeArray(d_mv_x_array);
hipUnbindTexture(d_mv_y_tex);
hipFreeArray(d_mv_y_array);
hipUnbindTexture(d_mv_z_tex);
hipFreeArray(d_mv_z_array);
hipFree(d_im_out);
}
__global__ void transToFloat2(const float *input1, const float *input2, float2 *output, const int n)
{
const int tid = (blockIdx.y*NBLOCKX + blockIdx.x)*blockDim.x + threadIdx.x;
// obtain current id on thread
if (tid < n)
{
output[tid] = make_float2(input1[tid], input2[tid]);
}
}
#endif
| 7ef47fb36e16d6e7639b46c9db32c714acb64bc7.cu | /*******************************************************************
c* Multimodal Deformable Image Registration *
c* via Mutual Information or Bhattacharyya Distantce *
c* Version: 1.0 *
c* Language: C, CUDA *
c* *
c* Developer: Yifei Lou *
c* Email: [email protected] *
c* *
c* School of Electrical and Computer Engineering *
c* Georgia Institute of Technology *
c* Atlanta, GA, 30318 *
c* Website: http://groups.bme.gatech.edu/groups/bil/ *
c* *
c* Copyright (c) 2011 *
c* All rights reserved. *
c* *
c* Permission to use, copy, or modify this code and its *
c* documentation for scientific purpose is hereby granted *
c* without fee, provided that this copyright notice appear in *
c* all copies and that both that copyright notice and this *
c* permission notice appear in supporting documentation. The use *
c* for commercial purposes is prohibited without permission. *
c* *
c* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND *
c* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, *
c* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF *
c* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *
c* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR *
c* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *
c* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT *
c* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF*
c* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED *
c* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT *
c* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
c* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF *
c* THE POSSIBILITY OF SUCH DAMAGE. *
c* *
c******************************************************************/
/*******************************************************************
c* Short discription *
c* main function to register two images on the current scale *
c* including upsample and downsample *
c******************************************************************/
#ifndef _FUN_COMPUTE_CU_
#define _FUN_COMPUTE_CU_
// hash a point in the unit square to the index of
// the grid bucket that contains it
struct point_to_bucket_index : public thrust::unary_function<float2,unsigned int>
{
__host__ __device__
point_to_bucket_index(unsigned int width, unsigned int height)
:w(width),h(height){}
__host__ __device__
unsigned int operator()(float2 p) const
{
// find the raster indices of p's bucket
unsigned int x = static_cast<unsigned int>(p.x * (w-1));
unsigned int y = static_cast<unsigned int>(p.y * (h-1));
// return the bucket's linear index
return y * w + x;
}
unsigned int w, h;
};
__global__ void downSample(float *src, float *dest, int NX, int NY, int NZ, int s)
{
const int tid = (blockIdx.y*NBLOCKX + blockIdx.x)*blockDim.x + threadIdx.x;
if(tid < NX*NY*NZ)
{
int z = tid/(NX*NY);
int y = (tid%(NX*NY))/NX;
int x = tid%NX;
float sum =0.0f;
for(int xs = 0; xs<s; xs++)
for(int ys =0; ys<s; ys++)
sum += src[s*x+xs + (s*y+ys)*NX0 + s*z*NX0*NY0];
dest[tid] = sum/s/s;
}
}
__global__ void upSample(float *src, float *dest, int NX, int NY, int NZ)
// upsampling
{
const int tid = (blockIdx.y*NBLOCKX + blockIdx.x)*blockDim.x + threadIdx.x;
if(tid < NX*NY*NZ)
{
int z = tid/(NX*NY);
int y = (tid%(NX*NY))/NX;
int x = tid%NX;
int xmin = x/2 - (x%2 == 0);
int xmax = x/2 + (x%2 == 1);
int ymin = y/2 - (y%2 == 0);
int ymax = y/2 + (y%2 == 1);
int zmin = z/2 - (z%2 == 0);
int zmax = z/2 + (z%2 == 1);
xmin = (xmin < 0) ? 0: xmin;
ymin = (ymin < 0) ? 0: ymin;
zmin = (zmin < 0) ? 0: zmin;
xmax = (xmax < NX)? xmax : NX-1;
ymax = (ymax < NY)? ymax : NY-1;
zmax = (zmax < NZ)? zmax : NZ-1;
float wx = 0.25 + 0.5*(x%2==0);
float wy = 0.25 + 0.5*(y%2==0);
float wz = 0.25 + 0.5*(z%2==0);
dest[tid] = src[xmin + ymin*NX/2 + zmin*NX/2*NY/2] * (1.0 - wx) * (1.0-wy) * (1.0-wz) +
src[xmax + ymin*NX/2 + zmin*NX/2*NY/2] * wx * (1.0-wy) * (1.0-wz) +
src[xmin + ymax*NX/2 + zmin*NX/2*NY/2] * (1.0 - wx) * wy * (1.0-wz) +
src[xmax + ymax*NX/2 + zmin*NX/2*NY/2] * wx * wy * (1.0-wz) +
src[xmin + ymin*NX/2 + zmax*NX/2*NY/2] * (1.0 - wx) * (1.0-wy) * wz +
src[xmax + ymin*NX/2 + zmax*NX/2*NY/2] * wx * (1.0-wy) * wz +
src[xmin + ymax*NX/2 + zmax*NX/2*NY/2] * (1.0 - wx) * wy * wz +
src[xmax + ymax*NX/2 + zmax*NX/2*NY/2] * wx * wy * wz;
dest[tid] = 2*dest[tid];
}
}
void compute(float *d_im_move, float *d_im_static, float *d_mv_x, float *d_mv_y, float *d_mv_z, int maxIter)
// d_mv_x, d_mv_y and d_im_move are updated
{
// bind moving image to texture
const cudaExtent volumeSize = make_cudaExtent(NX, NY, NZ);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cutilSafeCall( cudaMalloc3DArray(&d_im_move_array, &channelDesc, volumeSize) );
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr((void*)d_im_move, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_im_move_array;
copyParams.extent = volumeSize;
copyParams.kind = cudaMemcpyDeviceToDevice;
cutilSafeCall( cudaMemcpy3D(©Params) );
d_im_move_tex.normalized = false;
d_im_move_tex.filterMode = cudaFilterModeLinear;
cutilSafeCall(cudaBindTextureToArray(d_im_move_tex, d_im_move_array, channelDesc));
// bind vector flows to texture
cutilSafeCall( cudaMalloc3DArray(&d_mv_x_array, &channelDesc, volumeSize) );
cudaMemcpy3DParms copyParams_x = {0};
copyParams_x.srcPtr = make_cudaPitchedPtr((void*)d_mv_x, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams_x.dstArray = d_mv_x_array;
copyParams_x.extent = volumeSize;
copyParams_x.kind = cudaMemcpyDeviceToDevice;
cutilSafeCall( cudaMemcpy3D(©Params_x) );
d_mv_x_tex.normalized = false;
d_mv_x_tex.filterMode = cudaFilterModeLinear;
cutilSafeCall( cudaMalloc3DArray(&d_mv_y_array, &channelDesc, volumeSize) );
cudaMemcpy3DParms copyParams_y = {0};
copyParams_y.srcPtr = make_cudaPitchedPtr((void*)d_mv_y, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams_y.dstArray = d_mv_y_array;
copyParams_y.extent = volumeSize;
copyParams_y.kind = cudaMemcpyDeviceToDevice;
cutilSafeCall( cudaMemcpy3D(©Params_y) );
d_mv_y_tex.normalized = false;
d_mv_y_tex.filterMode = cudaFilterModeLinear;
cutilSafeCall( cudaMalloc3DArray(&d_mv_z_array, &channelDesc, volumeSize) );
cudaMemcpy3DParms copyParams_z = {0};
copyParams_z.srcPtr = make_cudaPitchedPtr((void*)d_mv_z, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams_z.dstArray = d_mv_z_array;
copyParams_z.extent = volumeSize;
copyParams_z.kind = cudaMemcpyDeviceToDevice;
cutilSafeCall( cudaMemcpy3D(©Params_z) );
d_mv_z_tex.normalized = false;
d_mv_z_tex.filterMode = cudaFilterModeLinear;
float *d_im_out;
cutilSafeCall( cudaMalloc((void **)&d_im_out, sDATA_SIZE) );
// velocity
float *d_v_x, *d_v_x_copy;
float *d_v_y, *d_v_y_copy;
float *d_v_z, *d_v_z_copy;
cutilSafeCall( cudaMalloc((void **)&d_v_x, sDATA_SIZE) );
cutilSafeCall( cudaMalloc((void **)&d_v_y, sDATA_SIZE) );
cutilSafeCall( cudaMalloc((void **)&d_v_z, sDATA_SIZE) );
cutilSafeCall( cudaMalloc((void **)&d_v_x_copy, sDATA_SIZE) );
cutilSafeCall( cudaMalloc((void **)&d_v_y_copy, sDATA_SIZE) );
cutilSafeCall( cudaMalloc((void **)&d_v_z_copy, sDATA_SIZE) );
// setup for computing joint histogram via thrust
// the grid data structure keeps a range per grid bucket:
// each bucket_begin[i] indexes the first element of bucket i's list of points
// each bucket_end[i] indexes one past the last element of bucket i's list of points
thrust::device_vector<unsigned int> bucket_begin(nBin*nBin);
thrust::device_vector<unsigned int> bucket_end(nBin*nBin);
// allocate storage for each point's bucket index
thrust::device_vector<unsigned int> bucket_indices(NX*NY*NZ);
// allocate space to hold per-bucket sizes
thrust::device_vector<unsigned int> bucket_sizes(nBin*nBin);
// allocate float2 vector
float2 *d_points;
cudaMalloc((void**) &d_points, sizeof(float2)*NX*NY*NZ);
int regrid = 0;
float MI[1000];
int3 Dims;
Dims.x = NX;
Dims.y = NY;
Dims.z = NZ;
for(int it=0; it<maxIter; it++)
{
// upate image
ImageWarp<<<nblocks, NTHREAD_PER_BLOCK>>>(d_mv_x, d_mv_y, d_mv_z, d_im_out, NX, NY, NZ);
// joint histogram via thrust ----- begin
// convert to float2 vector
transToFloat2<<<nblocks, NTHREAD_PER_BLOCK>>>(d_im_out, d_im_static, d_points, NX*NY*NZ);
// use a thrust ptr to wrap the raw pointer
thrust::device_ptr<float2> points_t(d_points);
// transform the points to their bucket indices
thrust::transform(points_t, points_t+NX*NY*NZ, bucket_indices.begin(), point_to_bucket_index(nBin,nBin));
// sort the bucket index
thrust::sort(bucket_indices.begin(), bucket_indices.end());
// find the beginning of each bucket's list of points
thrust::counting_iterator<unsigned int> search_begin(0);
thrust::lower_bound(bucket_indices.begin(), bucket_indices.end(), search_begin,
search_begin + nBin*nBin, bucket_begin.begin());
// find the end of each bucket's list of points
thrust::upper_bound(bucket_indices.begin(), bucket_indices.end(), search_begin,
search_begin + nBin*nBin, bucket_end.begin());
// take the difference between bounds to find each bucket size
thrust::transform(bucket_end.begin(), bucket_end.end(), bucket_begin.begin(),
bucket_sizes.begin(), thrust :: minus<unsigned int>());
// now hist contains the histogram
unsigned int *hist = thrust::raw_pointer_cast(&bucket_sizes[0]);
copyHist<<<nblocks_hist, NTHREAD_PER_BLOCK>>>(hist, d_jointHistogram);
// joint histogram via thrust ----- end
// compute the convolution of joint histogram
myconv2dGPU<<<nblocks_hist, NTHREAD_PER_BLOCK>>>(d_jointHistogram, d_jointHistogram_conv, GaussKernelH, nBin, nBin, 3*hValue);
// normalize joint histogram
float sum = cublasSasum (nBin*nBin, d_jointHistogram_conv , 1);
cublasSscal (nBin*nBin, 1.0f/sum, d_jointHistogram_conv, 1);
// compute mutual info by GPU
marginalDist<<<nBin, nBin>>>(d_jointHistogram_conv, d_probx, d_proby);
switch (METHOD)
{
case 1:
marginalBnorm_sum<<<nblocks_hist, NTHREAD_PER_BLOCK>>>(d_jointHistogram_conv, d_probx, d_proby, d_jointHistogram);
marginalDistAlongY<<<nBin, nBin>>>(d_jointHistogram, d_Bsum);
BnormGPU<<<nblocks_hist, NTHREAD_PER_BLOCK>>>(d_jointHistogram_conv, d_probx, d_proby,d_Bsum, d_jointHistogram);
break;
case 2:
mutualInfoGPU<<<nblocks_hist, NTHREAD_PER_BLOCK>>>(d_jointHistogram_conv, d_probx, d_proby, d_jointHistogram);
break;
}
MI[it] = cublasSasum (nBin*nBin, d_jointHistogram_conv, 1);
printf("mutual information (%d)= %f\n", it, MI[it]);
// NOTE: after this step, jointHistogram becomes the likelihood
// compute the first derivative w.r.t. x-dim of joint histogram
myconv2dGPU<<<nblocks_hist, NTHREAD_PER_BLOCK>>>(d_jointHistogram, d_jointHistogram_conv, GaussKernelHx, nBin, nBin,3*hValue);
// compute the force
forceComp<<<nblocks, NTHREAD_PER_BLOCK>>>(d_im_out, d_im_static, d_jointHistogram_conv, d_v_x, d_v_y, d_v_z, NX, NY, NZ);
ImageSmooth(d_v_x, d_v_x_copy,Dims);
ImageSmooth(d_v_y, d_v_y_copy,Dims);
ImageSmooth(d_v_z, d_v_z_copy,Dims);
flowComp<<<nblocks, NTHREAD_PER_BLOCK>>>(d_mv_x, d_mv_y, d_mv_z, d_v_x_copy, d_v_y_copy, d_v_z_copy, d_v_x, d_v_y, NX, NY, NZ);
// NOTE: d_v_x is Jacobian, d_v_y is the max flow
// d_v_x_copy, d_v_y_copy, d_v_z_copy are the displacement
thrust :: device_ptr<float> data_ptr(d_v_y);
int maxInd = cublasIsamax(NX*NY*NZ, d_v_y, 1) -1;
float maxflow = data_ptr[maxInd];
float dt = (du/maxflow); // > 1) ? 1 : du/maxflow;
printf("dt = %f \n", dt);
flowUpdate<<<nblocks, NTHREAD_PER_BLOCK>>>(d_mv_x, d_mv_y, d_mv_z, d_v_x_copy, d_v_y_copy, d_v_z_copy,dt, NX, NY, NZ);
// regridding if Jacobian < threshJaco
sum = cublasSasum(NX*NY*NZ, d_v_x, 1);
if (sum>0.5)
{
regrid ++;
printf("regrid = %d\n", regrid);
// save d_im_move to be d_im_out
cudaUnbindTexture(d_im_move_tex);
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr((void*)d_im_out, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_im_move_array;
copyParams.extent = volumeSize;
copyParams.kind = cudaMemcpyDeviceToDevice;
cutilSafeCall( cudaMemcpy3D(©Params) );
cutilSafeCall(cudaBindTextureToArray(d_im_move_tex, d_im_move_array));
// update vector flow
ImageWarp_mv<<<nblocks, NTHREAD_PER_BLOCK>>>(d_mv_x, d_mv_y, d_mv_z, NX, NY, NZ);
cudaMemcpy3DParms copyParams_x = {0};
copyParams_x.srcPtr = make_cudaPitchedPtr((void*)d_mv_x, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams_x.dstArray = d_mv_x_array;
copyParams_x.extent = volumeSize;
copyParams_x.kind = cudaMemcpyDeviceToDevice;
cutilSafeCall( cudaMemcpy3D(©Params_x) );
cutilSafeCall(cudaBindTextureToArray(d_mv_x_tex, d_mv_x_array));
cudaMemcpy3DParms copyParams_y = {0};
copyParams_y.srcPtr = make_cudaPitchedPtr((void*)d_mv_y, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams_y.dstArray = d_mv_y_array;
copyParams_y.extent = volumeSize;
copyParams_y.kind = cudaMemcpyDeviceToDevice;
cutilSafeCall( cudaMemcpy3D(©Params_y) );
cutilSafeCall(cudaBindTextureToArray(d_mv_y_tex, d_mv_y_array));
cudaMemcpy3DParms copyParams_z = {0};
copyParams_z.srcPtr = make_cudaPitchedPtr((void*)d_mv_z, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams_z.dstArray = d_mv_z_array;
copyParams_z.extent = volumeSize;
copyParams_z.kind = cudaMemcpyDeviceToDevice;
cutilSafeCall( cudaMemcpy3D(©Params_z) );
cutilSafeCall(cudaBindTextureToArray(d_mv_z_tex, d_mv_z_array));
cutilSafeCall( cudaMemset(d_mv_x, 0, sDATA_SIZE) );
cutilSafeCall( cudaMemset(d_mv_y, 0, sDATA_SIZE) );
cutilSafeCall( cudaMemset(d_mv_z, 0, sDATA_SIZE) );
} // end for regridding
} // for-loop iteration
if (!regrid)
{
ImageWarp<<<nblocks, NTHREAD_PER_BLOCK>>>(d_mv_x, d_mv_y, d_mv_z, d_im_move, NX, NY, NZ);
}
else
{
cudaMemcpy3DParms copyParams = {0};
cudaUnbindTexture(d_im_move_tex);
copyParams.srcPtr = make_cudaPitchedPtr((void*)d_im_move, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_im_move_array;
copyParams.extent = volumeSize;
copyParams.kind = cudaMemcpyDeviceToDevice;
cutilSafeCall( cudaMemcpy3D(©Params) );
cutilSafeCall(cudaBindTextureToArray(d_im_move_tex, d_im_move_array));
ImageWarp_final<<<nblocks, NTHREAD_PER_BLOCK>>>(d_mv_x, d_mv_y, d_mv_z,d_im_move, NX, NY, NZ);
}
cudaFree(d_points);
cudaFree(d_v_x);
cudaFree(d_v_y);
cudaFree(d_v_z);
cudaFree(d_v_x_copy);
cudaFree(d_v_y_copy);
cudaFree(d_v_z_copy);
cudaUnbindTexture(d_im_move_tex);
cudaFreeArray(d_im_move_array);
cudaUnbindTexture(d_mv_x_tex);
cudaFreeArray(d_mv_x_array);
cudaUnbindTexture(d_mv_y_tex);
cudaFreeArray(d_mv_y_array);
cudaUnbindTexture(d_mv_z_tex);
cudaFreeArray(d_mv_z_array);
cudaFree(d_im_out);
}
__global__ void transToFloat2(const float *input1, const float *input2, float2 *output, const int n)
{
const int tid = (blockIdx.y*NBLOCKX + blockIdx.x)*blockDim.x + threadIdx.x;
// obtain current id on thread
if (tid < n)
{
output[tid] = make_float2(input1[tid], input2[tid]);
}
}
#endif
|
03_opengl_ripple.hip | // !!! This is a file automatically generated by hipify!!!
/**
* An example showing CUDA & OpenGL interoperability heavy copied from
* CUDA By Example by Jason Sanders and Edward Kandrot
*
*
* There is quite a bit of OpenGL overhead, but the basic idea is that
* OpenGL needs to manage the memory on the GPU, and CUDA just gets
* a pointer to that memory to manipulate.
* The CUDA kernel code is called on each frame draw
*
*
* Besides the OpenGL stuff, this code also demonstrates using more than 1D
* gridDim and blockDim in the kernel launch parameters.
* Dimension logic is shown to convert between CUDA thread dimensions to a
* 2D picture pixel position to a 1D buffer index.
*
* Danny George 2012
*/
#define GL_GLEXT_PROTOTYPES
#include <stdio.h>
#include "GL/glut.h"
#include "hip/hip_runtime.h"
#include "cuda_gl_interop.h"
#define DIM 512 // keep as power of 2 above 16
static void HandleError( hipError_t err, const char *file, int line )
{
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
GLuint bufferObj;
cudaGraphicsResource *resource;
// based on ripple code, but uses uchar4 which is the type of data graphic interop uses
__global__ void kernel(uchar4 *ptr, int ticks)
{
// map from threadIdx / blockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
// map from pixel position to buffer index
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x - DIM/2;
float fy = y - DIM/2;
float d = sqrtf(fx * fx + fy * fy);
unsigned char grey = (unsigned char)(128.0f + 127.0f *
cos(d/10.0f - ticks/7.0f) / (d/10.0f + 1.0f));
ptr[offset].x = grey; // R
ptr[offset].y = grey; // G
ptr[offset].z = grey; // B
ptr[offset].w = 255; // A
}
static void draw_func(void)
{
static int ticks = 1;
// create a devPtr that we can pass to our CUDA kernels
uchar4 * devPtr;
size_t size;
HANDLE_ERROR( hipGraphicsMapResources(1, &resource, NULL) );
HANDLE_ERROR( hipGraphicsResourceGetMappedPointer((void **)&devPtr, &size, resource) );
dim3 grids(DIM/16, DIM/16);
dim3 threads(16, 16);
hipLaunchKernelGGL(( kernel), dim3(grids), dim3(threads), 0, 0, devPtr, ticks++);
HANDLE_ERROR( hipGraphicsUnmapResources(1, &resource, NULL) );
// pixel buffer is already bound (GL_PIXEL_UNPACK_BUFFER_ARB)
glDrawPixels(DIM, DIM, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glutSwapBuffers();
}
static void key_func(unsigned char key, int x, int y)
{
switch (key) {
case 27: // ESC
// clean up OpenGL and CUDA
HANDLE_ERROR( hipGraphicsUnregisterResource(resource) );
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glDeleteBuffers(1, &bufferObj);
exit(0);
}
}
int main(int argc, char *argv[])
{
hipDeviceProp_t prop;
int dev;
memset(&prop, 0, sizeof(hipDeviceProp_t));
prop.major = 1;
prop.minor = 0;
// grab a CUDA device >= 1.0
// we need the device number to tell CUDA runtime we
// intend to run CUDA & OpenGL on it
HANDLE_ERROR( hipChooseDevice(&dev, &prop) );
HANDLE_ERROR( hipGLSetGLDevice(dev) );
// initialize GLUT
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(DIM, DIM);
glutCreateWindow(argv[0]);
// creating a pixel buffer object (pbo)
glGenBuffers(1, &bufferObj);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, DIM * DIM * 4,
NULL, GL_DYNAMIC_DRAW_ARB);
// register bufferObj with CUDA runtime as a graphics resource
HANDLE_ERROR( hipGraphicsGLRegisterBuffer(&resource, bufferObj, hipGraphicsMapFlagsNone) );
// setup GLUT and kick off main loop
glutKeyboardFunc(key_func);
glutDisplayFunc(draw_func);
glutIdleFunc(draw_func);
glutMainLoop();
return 0;
}
| 03_opengl_ripple.cu | /**
* An example showing CUDA & OpenGL interoperability heavy copied from
* CUDA By Example by Jason Sanders and Edward Kandrot
*
*
* There is quite a bit of OpenGL overhead, but the basic idea is that
* OpenGL needs to manage the memory on the GPU, and CUDA just gets
* a pointer to that memory to manipulate.
* The CUDA kernel code is called on each frame draw
*
*
* Besides the OpenGL stuff, this code also demonstrates using more than 1D
* gridDim and blockDim in the kernel launch parameters.
* Dimension logic is shown to convert between CUDA thread dimensions to a
* 2D picture pixel position to a 1D buffer index.
*
* Danny George 2012
*/
#define GL_GLEXT_PROTOTYPES
#include <stdio.h>
#include "GL/glut.h"
#include "cuda.h"
#include "cuda_gl_interop.h"
#define DIM 512 // keep as power of 2 above 16
static void HandleError( cudaError_t err, const char *file, int line )
{
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
GLuint bufferObj;
cudaGraphicsResource *resource;
// based on ripple code, but uses uchar4 which is the type of data graphic interop uses
__global__ void kernel(uchar4 *ptr, int ticks)
{
// map from threadIdx / blockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
// map from pixel position to buffer index
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x - DIM/2;
float fy = y - DIM/2;
float d = sqrtf(fx * fx + fy * fy);
unsigned char grey = (unsigned char)(128.0f + 127.0f *
cos(d/10.0f - ticks/7.0f) / (d/10.0f + 1.0f));
ptr[offset].x = grey; // R
ptr[offset].y = grey; // G
ptr[offset].z = grey; // B
ptr[offset].w = 255; // A
}
static void draw_func(void)
{
static int ticks = 1;
// create a devPtr that we can pass to our CUDA kernels
uchar4 * devPtr;
size_t size;
HANDLE_ERROR( cudaGraphicsMapResources(1, &resource, NULL) );
HANDLE_ERROR( cudaGraphicsResourceGetMappedPointer((void **)&devPtr, &size, resource) );
dim3 grids(DIM/16, DIM/16);
dim3 threads(16, 16);
kernel<<<grids, threads>>>(devPtr, ticks++);
HANDLE_ERROR( cudaGraphicsUnmapResources(1, &resource, NULL) );
// pixel buffer is already bound (GL_PIXEL_UNPACK_BUFFER_ARB)
glDrawPixels(DIM, DIM, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glutSwapBuffers();
}
static void key_func(unsigned char key, int x, int y)
{
switch (key) {
case 27: // ESC
// clean up OpenGL and CUDA
HANDLE_ERROR( cudaGraphicsUnregisterResource(resource) );
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glDeleteBuffers(1, &bufferObj);
exit(0);
}
}
int main(int argc, char *argv[])
{
cudaDeviceProp prop;
int dev;
memset(&prop, 0, sizeof(cudaDeviceProp));
prop.major = 1;
prop.minor = 0;
// grab a CUDA device >= 1.0
// we need the device number to tell CUDA runtime we
// intend to run CUDA & OpenGL on it
HANDLE_ERROR( cudaChooseDevice(&dev, &prop) );
HANDLE_ERROR( cudaGLSetGLDevice(dev) );
// initialize GLUT
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(DIM, DIM);
glutCreateWindow(argv[0]);
// creating a pixel buffer object (pbo)
glGenBuffers(1, &bufferObj);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, DIM * DIM * 4,
NULL, GL_DYNAMIC_DRAW_ARB);
// register bufferObj with CUDA runtime as a graphics resource
HANDLE_ERROR( cudaGraphicsGLRegisterBuffer(&resource, bufferObj, cudaGraphicsMapFlagsNone) );
// setup GLUT and kick off main loop
glutKeyboardFunc(key_func);
glutDisplayFunc(draw_func);
glutIdleFunc(draw_func);
glutMainLoop();
return 0;
}
|
0bca95bc67bc9cba2f611a02adc3f830d6911ec2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define PRECISION_z
#define COMPLEX
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
__global__ void
magma_zselect_insert_kernel(
magma_int_t n,
magma_int_t p,
magma_index_t *row,
magma_index_t *col,
magmaDoubleComplex *val,
magma_index_t *rowMT,
magma_index_t *colMT,
magmaDoubleComplex *valMT,
magma_index_t *selection,
magma_index_t *sizes )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
magma_index_t select = selection[j];
// return if no match for this thread block
if( select != p ){
return;
}
magma_index_t count = sizes[j];
if( i<count ){
colMT[ rowMT[j]+i ] = col[ row[j]+i ];
valMT[ rowMT[j]+i ] = val[ row[j]+i ];
}
}// kernel
__global__ void
magma_zselect_rowptr_kernel(
magma_int_t n,
magma_index_t *sizes,
magma_index_t *rowMT )
{
// unfortunately sequential...
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i == 0 ){
magma_index_t count = 0;
rowMT[0] = 0;
magma_index_t j=0;
for( j=0; j<n; j++ ){
count = count+sizes[j];
rowMT[j+1] = count;
}
}
}// kernel
__global__ void
magma_zselect_pattern_kernel(
magma_int_t n,
magma_int_t p,
magma_index_t *row,
magma_index_t *selection,
magma_index_t *sizes )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
magma_index_t diff = row[i+1] - row[i];
if( diff <= WRP ){
selection[ i ] = p;
sizes[i] = diff;
}
}
}// kernel
/**
Purpose
-------
This routine maximizes the pattern for the ISAI preconditioner. Precisely,
it computes L, L^2, L^3, L^4, L^5 and then selects the columns of M_L
such that the nonzer-per-column are the lower max than the
implementation-specific limit (32).
The input is the original matrix (row-major)
The output is already col-major.
Arguments
---------
@param[in,out]
L magma_z_matrix
Incomplete factor.
@param[in,out]
MT magma_z_matrix*
SPAI preconditioner structure, CSR col-major.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" magma_int_t
magma_zgeisai_maxblock(
magma_z_matrix L,
magma_z_matrix *MT,
magma_queue_t queue )
{
magma_int_t info = 0;
int bs1 = 512;
int bs2 = 1;
int bs3 = 1;
int gs1 = magma_ceildiv( L.num_rows, bs1 );
int gs2 = 1;
int gs3 = 1;
dim3 block( bs1, bs2, bs3 );
dim3 grid( gs1,gs2,gs3 );
dim3 block0( 1, 1, 1 );
dim3 grid0( 1, 1, 1 );
int blocksize1 = WARP_SIZE;
int blocksize2 = 1;
int dimgrid1 = min( int( sqrt( double( L.num_rows ))), 65535 );
int dimgrid2 = min(magma_ceildiv( L.num_rows, dimgrid1 ), 65535);
int dimgrid3 = magma_ceildiv( L.num_rows, dimgrid1*dimgrid2 );
dim3 block2( blocksize1, blocksize2, 1 );
dim3 grid2( dimgrid1, dimgrid2, dimgrid3 );
magma_z_matrix L2={Magma_CSR}, L3={Magma_CSR},
L4={Magma_CSR}, L5={Magma_CSR}, T={Magma_CSR};
magma_index_t *selections_d = NULL, *sizes_d = NULL;
CHECK( magma_index_malloc( &selections_d, L.num_rows ) );
CHECK( magma_index_malloc( &sizes_d, L.num_rows ) );
magma_int_t nonzeros;
// generate all pattern that may be considered
// pattern L
CHECK( magma_z_mtransfer( L, &T, Magma_DEV, Magma_DEV, queue ) );
// pattern L^2
CHECK( magma_z_spmm( MAGMA_Z_ONE, L, T, &L2, queue ) );
// pattern L^3
CHECK( magma_z_spmm( MAGMA_Z_ONE, T, L2, &L3, queue ) );
// pattern L^4
CHECK( magma_z_spmm( MAGMA_Z_ONE, T, L3, &L4, queue ) );
// pattern L^5
CHECK( magma_z_spmm( MAGMA_Z_ONE, T, L4, &L5, queue ) );
// check for pattern L
hipLaunchKernelGGL(( magma_zselect_pattern_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
L.num_rows, 1, L.drow, selections_d, sizes_d );
// check for pattern L2
hipLaunchKernelGGL(( magma_zselect_pattern_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
L.num_rows, 2, L2.drow, selections_d, sizes_d );
// check for pattern L3
hipLaunchKernelGGL(( magma_zselect_pattern_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
L.num_rows, 3, L3.drow, selections_d, sizes_d );
// check for pattern L4
hipLaunchKernelGGL(( magma_zselect_pattern_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
L.num_rows, 4, L4.drow, selections_d, sizes_d );
// check for pattern L5
hipLaunchKernelGGL(( magma_zselect_pattern_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
L.num_rows, 5, L5.drow, selections_d, sizes_d );
//now allocate the roptr for MT
CHECK( magma_index_malloc( &MT->drow, L.num_rows+1 ) );
// global nonzero count + generate rowptr
hipLaunchKernelGGL(( magma_zselect_rowptr_kernel), dim3(grid0), dim3(block0), 0, queue->cuda_stream() ,
L.num_rows, sizes_d, MT->drow );
hipMemcpy( &nonzeros, MT->drow+L.num_rows, sizeof(magma_index_t), hipMemcpyDeviceToHost);
//now allocate the memory needed
CHECK( magma_index_malloc( &MT->dcol, nonzeros ) );
CHECK( magma_zmalloc( &MT->dval, nonzeros ) );
// fill in some info
MT->memory_location = Magma_DEV;
MT->storage_type = Magma_CSR;
MT->num_rows = L.num_rows;
MT->num_cols = L.num_cols;
MT->nnz = nonzeros;
MT->true_nnz = nonzeros;
MT->fill_mode = T.fill_mode;
// now insert the data needed
hipLaunchKernelGGL(( magma_zselect_insert_kernel), dim3(grid2), dim3(block2), 0, queue->cuda_stream() ,
L.num_rows, 1,
L.drow, L.dcol, L.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
hipLaunchKernelGGL(( magma_zselect_insert_kernel), dim3(grid2), dim3(block2), 0, queue->cuda_stream() ,
L.num_rows, 2,
L2.drow, L2.dcol, L2.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
hipLaunchKernelGGL(( magma_zselect_insert_kernel), dim3(grid2), dim3(block2), 0, queue->cuda_stream() ,
L.num_rows, 3,
L3.drow, L3.dcol, L3.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
hipLaunchKernelGGL(( magma_zselect_insert_kernel), dim3(grid2), dim3(block2), 0, queue->cuda_stream() ,
L.num_rows, 4,
L4.drow, L4.dcol, L4.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
hipLaunchKernelGGL(( magma_zselect_insert_kernel), dim3(grid2), dim3(block2), 0, queue->cuda_stream() ,
L.num_rows, 5,
L5.drow, L5.dcol, L5.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
cleanup:
magma_free( sizes_d );
magma_free( selections_d );
magma_zmfree( &T, queue );
magma_zmfree( &L2, queue );
magma_zmfree( &L3, queue );
magma_zmfree( &L4, queue );
magma_zmfree( &L5, queue );
return info;
}
| 0bca95bc67bc9cba2f611a02adc3f830d6911ec2.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define PRECISION_z
#define COMPLEX
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
__global__ void
magma_zselect_insert_kernel(
magma_int_t n,
magma_int_t p,
magma_index_t *row,
magma_index_t *col,
magmaDoubleComplex *val,
magma_index_t *rowMT,
magma_index_t *colMT,
magmaDoubleComplex *valMT,
magma_index_t *selection,
magma_index_t *sizes )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
magma_index_t select = selection[j];
// return if no match for this thread block
if( select != p ){
return;
}
magma_index_t count = sizes[j];
if( i<count ){
colMT[ rowMT[j]+i ] = col[ row[j]+i ];
valMT[ rowMT[j]+i ] = val[ row[j]+i ];
}
}// kernel
__global__ void
magma_zselect_rowptr_kernel(
magma_int_t n,
magma_index_t *sizes,
magma_index_t *rowMT )
{
// unfortunately sequential...
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i == 0 ){
magma_index_t count = 0;
rowMT[0] = 0;
magma_index_t j=0;
for( j=0; j<n; j++ ){
count = count+sizes[j];
rowMT[j+1] = count;
}
}
}// kernel
__global__ void
magma_zselect_pattern_kernel(
magma_int_t n,
magma_int_t p,
magma_index_t *row,
magma_index_t *selection,
magma_index_t *sizes )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
magma_index_t diff = row[i+1] - row[i];
if( diff <= WRP ){
selection[ i ] = p;
sizes[i] = diff;
}
}
}// kernel
/**
Purpose
-------
This routine maximizes the pattern for the ISAI preconditioner. Precisely,
it computes L, L^2, L^3, L^4, L^5 and then selects the columns of M_L
such that the nonzer-per-column are the lower max than the
implementation-specific limit (32).
The input is the original matrix (row-major)
The output is already col-major.
Arguments
---------
@param[in,out]
L magma_z_matrix
Incomplete factor.
@param[in,out]
MT magma_z_matrix*
SPAI preconditioner structure, CSR col-major.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" magma_int_t
magma_zgeisai_maxblock(
magma_z_matrix L,
magma_z_matrix *MT,
magma_queue_t queue )
{
magma_int_t info = 0;
int bs1 = 512;
int bs2 = 1;
int bs3 = 1;
int gs1 = magma_ceildiv( L.num_rows, bs1 );
int gs2 = 1;
int gs3 = 1;
dim3 block( bs1, bs2, bs3 );
dim3 grid( gs1,gs2,gs3 );
dim3 block0( 1, 1, 1 );
dim3 grid0( 1, 1, 1 );
int blocksize1 = WARP_SIZE;
int blocksize2 = 1;
int dimgrid1 = min( int( sqrt( double( L.num_rows ))), 65535 );
int dimgrid2 = min(magma_ceildiv( L.num_rows, dimgrid1 ), 65535);
int dimgrid3 = magma_ceildiv( L.num_rows, dimgrid1*dimgrid2 );
dim3 block2( blocksize1, blocksize2, 1 );
dim3 grid2( dimgrid1, dimgrid2, dimgrid3 );
magma_z_matrix L2={Magma_CSR}, L3={Magma_CSR},
L4={Magma_CSR}, L5={Magma_CSR}, T={Magma_CSR};
magma_index_t *selections_d = NULL, *sizes_d = NULL;
CHECK( magma_index_malloc( &selections_d, L.num_rows ) );
CHECK( magma_index_malloc( &sizes_d, L.num_rows ) );
magma_int_t nonzeros;
// generate all pattern that may be considered
// pattern L
CHECK( magma_z_mtransfer( L, &T, Magma_DEV, Magma_DEV, queue ) );
// pattern L^2
CHECK( magma_z_spmm( MAGMA_Z_ONE, L, T, &L2, queue ) );
// pattern L^3
CHECK( magma_z_spmm( MAGMA_Z_ONE, T, L2, &L3, queue ) );
// pattern L^4
CHECK( magma_z_spmm( MAGMA_Z_ONE, T, L3, &L4, queue ) );
// pattern L^5
CHECK( magma_z_spmm( MAGMA_Z_ONE, T, L4, &L5, queue ) );
// check for pattern L
magma_zselect_pattern_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( L.num_rows, 1, L.drow, selections_d, sizes_d );
// check for pattern L2
magma_zselect_pattern_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( L.num_rows, 2, L2.drow, selections_d, sizes_d );
// check for pattern L3
magma_zselect_pattern_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( L.num_rows, 3, L3.drow, selections_d, sizes_d );
// check for pattern L4
magma_zselect_pattern_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( L.num_rows, 4, L4.drow, selections_d, sizes_d );
// check for pattern L5
magma_zselect_pattern_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( L.num_rows, 5, L5.drow, selections_d, sizes_d );
//now allocate the roptr for MT
CHECK( magma_index_malloc( &MT->drow, L.num_rows+1 ) );
// global nonzero count + generate rowptr
magma_zselect_rowptr_kernel<<< grid0, block0, 0, queue->cuda_stream() >>>
( L.num_rows, sizes_d, MT->drow );
cudaMemcpy( &nonzeros, MT->drow+L.num_rows, sizeof(magma_index_t), cudaMemcpyDeviceToHost);
//now allocate the memory needed
CHECK( magma_index_malloc( &MT->dcol, nonzeros ) );
CHECK( magma_zmalloc( &MT->dval, nonzeros ) );
// fill in some info
MT->memory_location = Magma_DEV;
MT->storage_type = Magma_CSR;
MT->num_rows = L.num_rows;
MT->num_cols = L.num_cols;
MT->nnz = nonzeros;
MT->true_nnz = nonzeros;
MT->fill_mode = T.fill_mode;
// now insert the data needed
magma_zselect_insert_kernel<<< grid2, block2, 0, queue->cuda_stream() >>>
( L.num_rows, 1,
L.drow, L.dcol, L.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
magma_zselect_insert_kernel<<< grid2, block2, 0, queue->cuda_stream() >>>
( L.num_rows, 2,
L2.drow, L2.dcol, L2.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
magma_zselect_insert_kernel<<< grid2, block2, 0, queue->cuda_stream() >>>
( L.num_rows, 3,
L3.drow, L3.dcol, L3.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
magma_zselect_insert_kernel<<< grid2, block2, 0, queue->cuda_stream() >>>
( L.num_rows, 4,
L4.drow, L4.dcol, L4.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
magma_zselect_insert_kernel<<< grid2, block2, 0, queue->cuda_stream() >>>
( L.num_rows, 5,
L5.drow, L5.dcol, L5.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
cleanup:
magma_free( sizes_d );
magma_free( selections_d );
magma_zmfree( &T, queue );
magma_zmfree( &L2, queue );
magma_zmfree( &L3, queue );
magma_zmfree( &L4, queue );
magma_zmfree( &L5, queue );
return info;
}
|
ab7d0b66260dcd77877d310dcfd45dfdf98f247e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
void initVector(double *u, int n, double c) {
int i;
for (i=0; i<n; i++)
u[i] = c;
}
__global__ void gpuVectAdd(double *u, double *v, double *z, int N)
{
// define index
int i = blockIdx.x * blockDim.x + threadIdx.x;
// check that the thread is not out of the vector boundary
if (i >= N ) return;
int index = i;
// write the operation for the sum of vectors
z[index] = u[index] + v[index];
}
int main(int argc, char *argv[]) {
// size of vectors
const int N = 1000;
// allocate memory on host
double * u = (double *) malloc(N * sizeof(double));
double * v = (double *) malloc(N * sizeof(double));
double * z = (double *) malloc(N * sizeof(double));
initVector((double *) u, N, 1.0);
initVector((double *) v, N, 2.0);
initVector((double *) z, N, 0.0);
// allocate memory on device
double *u_dev, *v_dev, *z_dev;
hipMalloc((void **) &u_dev, N*sizeof(double));
hipMalloc((void **) &v_dev, N*sizeof(double));
hipMalloc((void **) &z_dev, N*sizeof(double));
// copy data from host to device
hipMemcpy(u_dev, u, N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(v_dev, v, N*sizeof(double), hipMemcpyHostToDevice);
dim3 block(32);
dim3 grid((N-1)/block.x + 1);
// define the execution configuration
hipLaunchKernelGGL(( gpuVectAdd), dim3(grid), dim3(block), 0, 0, u_dev, v_dev, z_dev, N);
// copy data from device to host
hipMemcpy(z, z_dev, N*sizeof(double), hipMemcpyDeviceToHost);
printf("%f %f %f\n", z[0], z[1], z[1]);
// free resources on device
hipFree(u_dev);
hipFree(v_dev);
hipFree(z_dev);
// free resources on host
free(u);
free(v);
free(z);
return 0;
}
| ab7d0b66260dcd77877d310dcfd45dfdf98f247e.cu | #include <stdio.h>
#include <stdlib.h>
void initVector(double *u, int n, double c) {
int i;
for (i=0; i<n; i++)
u[i] = c;
}
__global__ void gpuVectAdd(double *u, double *v, double *z, int N)
{
// define index
int i = blockIdx.x * blockDim.x + threadIdx.x;
// check that the thread is not out of the vector boundary
if (i >= N ) return;
int index = i;
// write the operation for the sum of vectors
z[index] = u[index] + v[index];
}
int main(int argc, char *argv[]) {
// size of vectors
const int N = 1000;
// allocate memory on host
double * u = (double *) malloc(N * sizeof(double));
double * v = (double *) malloc(N * sizeof(double));
double * z = (double *) malloc(N * sizeof(double));
initVector((double *) u, N, 1.0);
initVector((double *) v, N, 2.0);
initVector((double *) z, N, 0.0);
// allocate memory on device
double *u_dev, *v_dev, *z_dev;
cudaMalloc((void **) &u_dev, N*sizeof(double));
cudaMalloc((void **) &v_dev, N*sizeof(double));
cudaMalloc((void **) &z_dev, N*sizeof(double));
// copy data from host to device
cudaMemcpy(u_dev, u, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(v_dev, v, N*sizeof(double), cudaMemcpyHostToDevice);
dim3 block(32);
dim3 grid((N-1)/block.x + 1);
// define the execution configuration
gpuVectAdd<<<grid, block>>>(u_dev, v_dev, z_dev, N);
// copy data from device to host
cudaMemcpy(z, z_dev, N*sizeof(double), cudaMemcpyDeviceToHost);
printf("%f %f %f\n", z[0], z[1], z[1]);
// free resources on device
cudaFree(u_dev);
cudaFree(v_dev);
cudaFree(z_dev);
// free resources on host
free(u);
free(v);
free(z);
return 0;
}
|
Constant_Subtract_1D_Array_Kernel.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*********************************************************************
* Copyright 2011-2012,
* Marwan Abdellah: <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
********************************************************************/
/*!
* CUDA : This kernel subtracts a constant value from the input vector by
* into the output vector with length N.
*
* @param devArrayInput
* Input vector.
*
* @param constVal
* Constant value to be subtracted from the input device vector.
*
* @param devArrayOutput
* Resulting vector.
*
* @param N
* Vector length.
*
* @author
* Marwan Abdellah <[email protected]>
*
* @date
* Created: August, 2012.
* @date
* Last Update: September, 2012.
*
* @note
* Minimum CUDA version 3.2.
* @note
* Minimum Device Compute Capability 1.0.
*/
template <typename T>
__global__
void Constant_Subtract_1D_Array_Kernel(T* devArrayInput,
T constVal,
T* devArrayOutput,
int N)
{
int xThreadIdx = threadIdx.x;
int blockWidth = blockDim.x;
int index = blockIdx.x * blockWidth + xThreadIdx;
#ifdef VEC_CHECK
if (index < N)
devArrayOutput[index] = (T) ((T) devArrayInput[index] - (T) constVal);
#else
devArrayOutput[index] = (T) ((T) devArrayInput[index] - (T) constVal);
#endif
}
| Constant_Subtract_1D_Array_Kernel.cu | /*********************************************************************
* Copyright © 2011-2012,
* Marwan Abdellah: <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
********************************************************************/
/*!
* CUDA : This kernel subtracts a constant value from the input vector by
* into the output vector with length N.
*
* @param devArrayInput
* Input vector.
*
* @param constVal
* Constant value to be subtracted from the input device vector.
*
* @param devArrayOutput
* Resulting vector.
*
* @param N
* Vector length.
*
* @author
* Marwan Abdellah <[email protected]>
*
* @date
* Created: August, 2012.
* @date
* Last Update: September, 2012.
*
* @note
* Minimum CUDA version 3.2.
* @note
* Minimum Device Compute Capability 1.0.
*/
template <typename T>
__global__
void Constant_Subtract_1D_Array_Kernel(T* devArrayInput,
T constVal,
T* devArrayOutput,
int N)
{
int xThreadIdx = threadIdx.x;
int blockWidth = blockDim.x;
int index = blockIdx.x * blockWidth + xThreadIdx;
#ifdef VEC_CHECK
if (index < N)
devArrayOutput[index] = (T) ((T) devArrayInput[index] - (T) constVal);
#else
devArrayOutput[index] = (T) ((T) devArrayInput[index] - (T) constVal);
#endif
}
|
9efcfb2f598c1d28afd129e8e77547d07fd39869.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
//Include Stream Compaction files
#include "stream_compaction\efficient.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
// =============================================================================
// TIMER FUNCTIONS
// =============================================================================
using time_point_t = std::chrono::high_resolution_clock::time_point;
time_point_t time_start_cpu;
time_point_t time_end_cpu;
bool cpu_timer_started = false;
float prev_elapsed_time_cpu_milliseconds = 0.f;
void startCpuTimer()
{
if (cpu_timer_started) { throw std::runtime_error("CPU timer already started"); }
cpu_timer_started = true;
time_start_cpu = std::chrono::high_resolution_clock::now();
}
void endCpuTimer()
{
time_end_cpu = std::chrono::high_resolution_clock::now();
if (!cpu_timer_started) { throw std::runtime_error("CPU timer not started"); }
std::chrono::duration<double, std::milli> duro = time_end_cpu - time_start_cpu;
prev_elapsed_time_cpu_milliseconds =
static_cast<decltype(prev_elapsed_time_cpu_milliseconds)>(duro.count());
cpu_timer_started = false;
}
void printCPUTimer(int iter)
{
cout << "Time (in ms): " << prev_elapsed_time_cpu_milliseconds << endl;
cout << "Iteration: " << iter << endl;
}
// =============================================================================
// PATH TRACE INIT AND FREE CPU FUNCTIONS
// =============================================================================
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
//PathSeg and Isect indices sorted by Material
static int * dev_PathSegIndices = NULL;
static int * dev_IsectIndices = NULL;
//Caching first bounce
static ShadeableIntersection * dev_IsectCached = NULL;
//Lights array for direct lighting
static Geom * dev_lights = NULL;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
//PathSeg and Isect indices sorted by Material
hipMalloc(&dev_PathSegIndices, pixelcount * sizeof(int));
hipMalloc(&dev_IsectIndices, pixelcount * sizeof(int));
//Caching first bounce
hipMalloc(&dev_IsectCached, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_IsectCached, 0, pixelcount * sizeof(ShadeableIntersection));
//Lights array for direct lighting
hipMalloc(&dev_lights, scene->lights.size() * sizeof(Geom));
hipMemcpy(dev_lights, scene->lights.data(), scene->lights.size() * sizeof(Geom), hipMemcpyHostToDevice);
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
// TODO: clean up any extra device memory you created
hipFree(dev_PathSegIndices);
hipFree(dev_IsectIndices);
hipFree(dev_IsectCached);
hipFree(dev_lights);
checkCUDAError("pathtraceFree");
}
// =============================================================================
// GENERATE RAY FROM CAMERA KERNEL FUNCTION
// =============================================================================
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y)
{
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
// TODO: implement antialiasing by jittering the ray
// Note: If antialiasing -- can NOT cache first bounce!
thrust::default_random_engine rng = makeSeededRandomEngine(iter, x, y);
thrust::uniform_real_distribution<float> u01(-1, 1);
float offset_x = u01(rng);
float offset_y = u01(rng);
if (ANTI_ALIASING)
{
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x + offset_x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y + offset_y - (float)cam.resolution.y * 0.5f)
);
}
else
{
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
}
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
//Depth of field
//Generate sample point on disk lens, shoot ray through that
if (cam.lensRadius > 0.0f)
{
thrust::uniform_real_distribution<float> u02(0, 1);
glm::vec2 sample = glm::vec2(u02(rng), u02(rng));
glm::vec3 pLens = cam.lensRadius * squareToDiskConocentric(sample);
glm::vec3 pFocus = cam.focalDistance * segment.ray.direction + segment.ray.origin;
glm::vec3 aperaturePt = segment.ray.origin + (cam.up * pLens[1]) + (cam.right * pLens[0]);
segment.ray.origin = aperaturePt;
segment.ray.direction = glm::normalize(pFocus - aperaturePt);
}//end DOF check
}//end if
}
// =============================================================================
// COMPUTE INTERSECTION KERNEL FUNCTION
// =============================================================================
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}//end for all geoms
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal; //QUESTION: should this be normalized?
//intersections[path_index].surfaceNormal = glm::normalize(normal);
intersections[path_index].intersectionPt = intersect_point;
}
}
}
// =============================================================================
// COMPUTE RAY INTERSECTION
// =============================================================================
__host__ __device__
void rayIntersect(
Ray ray
, Geom * geoms
, int geoms_size
, ShadeableIntersection &isect)
{
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}//end for all geoms
if (hit_geom_index == -1)
{
isect.t = -1.0f;
}
else
{
//The ray hits something
isect.t = t_min;
isect.materialId = geoms[hit_geom_index].materialid;
isect.surfaceNormal = normal;
isect.intersectionPt = intersect_point;
}
}
// =============================================================================
// SHADER KERNEL FUNCTIONS + HELPER FUNCTIONS
// =============================================================================
__host__ __device__
glm::vec3 Le(const glm::vec3 &wo, const glm::vec3 &n, const Material &material)
{
//If isect is a light, calculated emitted light
if (material.emittance > 0.0f)
{
//NOTE: to test if getting black, just return material.emittance
//If normal of object hit and ray are in same direction, return light's color
if (glm::dot(n, wo) > 0.0f)
{
return material.color * material.emittance;
}
else
{
return glm::vec3(0.0f);
}
}
else
{
return glm::vec3(0.0f);
}
}
/*
UNUSED
Get an intersection on the surface of the light
Check if resultant PDF is 0 or that ref
*/
__host__ __device__
glm::vec3 Sample_Li(const Geom &light, const ShadeableIntersection &isect, thrust::default_random_engine &rng, glm::vec3 *wi)
{
thrust::uniform_real_distribution<float> u01(0, 1);
glm::vec2 sample2D = glm::vec2(u01(rng), u01(rng));
//SAMPLE THE SHAPE
//glm::vec3 resultIsectPt = ;
//glm::vec3 resultIsectNormal = ;
//if(pdf < EPSILON) --> return black
//if (resultIsectPt == isect.intersectionPt)
//{
// return glm::vec3(0.0f);
//}
//*wi = glm::normalize(resultIsectPt - isect.intersectionPt);
//return Le(-*wi, , resultIsectNormal);
return glm::vec3(1.0f);
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, int depth
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
if (pathSegments[idx].remainingBounces <= 0)
{
return;
}
ShadeableIntersection intersection = shadeableIntersections[idx];
// if the intersection exists...
if (intersection.t > 0.0f)
{
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
//Use depth for non-compact version to work properly
//https://groups.google.com/forum/#!topic/cis-565-fall-2017/thgdf2jzDyo
//thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth);
//thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f)
{
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else
{
//float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
//pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
//pathSegments[idx].color *= u01(rng); // apply some noise because why not
scatterRay(pathSegments[idx], intersection.intersectionPt, intersection.surfaceNormal, material, rng);
pathSegments[idx].remainingBounces--;
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}//end if
else
{
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}//end else
}
}
// NAIVE
__global__ void shadeNaiveMaterial(
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, int depth
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
if (pathSegments[idx].remainingBounces <= 0)
{
return;
}
ShadeableIntersection intersection = shadeableIntersections[idx];
// if the intersection exists...
if (intersection.t > 0.0f)
{
thrust::default_random_engine rng;
if (STREAM_COMPACTION) rng = makeSeededRandomEngine(iter, idx, 0);
else rng = makeSeededRandomEngine(iter, idx, depth);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f)
{
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
else
{
scatterRay(pathSegments[idx], intersection.intersectionPt, intersection.surfaceNormal, material, rng);
pathSegments[idx].remainingBounces--;
}
}//end if
else
{
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}//end else
}
}
// NAIVE AND DIRECT
__global__ void shadeNaiveAndDirectMaterial(
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, int depth
, Geom * lights
, int numLights
, Geom * geoms
, int numGeoms
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
if (pathSegments[idx].remainingBounces < 0)
{
return;
}
ShadeableIntersection intersection = shadeableIntersections[idx];
// if the intersection exists...
if (intersection.t > 0.0f)
{
thrust::default_random_engine rng;
if (STREAM_COMPACTION) rng = makeSeededRandomEngine(iter, idx, 0);
else rng = makeSeededRandomEngine(iter, idx, depth);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f)
{
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
else
{
scatterRay(pathSegments[idx], intersection.intersectionPt, intersection.surfaceNormal, material, rng);
pathSegments[idx].remainingBounces--;
//DIRECT LIGHTING
if (pathSegments[idx].remainingBounces == 0)
{
//Select random light source from lights array ------------------------
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth); //QUESTION: should this be 0 or depth?
thrust::uniform_real_distribution<float> u01(0, 1);
int randLightIdx = glm::min(
(int)glm::floor(u01(rng) * numLights),
(numLights - 1));
Geom currLight = lights[randLightIdx];
glm::vec3 ptOnLight;
glm::vec3 lightNormal;
if (currLight.type == SPHERE)
{
glm::vec2 sample(u01(rng), u01(rng));
ptOnLight = sampleSphere(sample, currLight, lightNormal);
}
else if (currLight.type == CUBE)
{
glm::vec3 sample(u01(rng), u01(rng), u01(rng));
ptOnLight = sampleCube(sample, currLight, lightNormal);
}
//Create shadow feeler ray ------------------------
glm::vec3 _dirToLight = ptOnLight - intersection.intersectionPt;
glm::vec3 rayDirToLight = glm::normalize(_dirToLight);
Ray rayToLight = spawnRay(intersection.intersectionPt, intersection.surfaceNormal, rayDirToLight);
//Get the intersection from spawning this new shadow feeler ray ------------------------
ShadeableIntersection shadowIsect;
rayIntersect(rayToLight, geoms, numGeoms, shadowIsect);
glm::vec3 visibility(1.0f);
if (shadowIsect.t > 0.0f)
{
visibility = ((glm::length(_dirToLight) - 0.1 > shadowIsect.t)) ? glm::vec3(0.0f) : glm::vec3(1.0f);
}
//Other LTE components ------------------------
Material lightMat = materials[currLight.materialid];
glm::vec3 sampleLiResult = lightMat.color * lightMat.emittance;
//glm::vec3 sampleLiResult = Le(rayDirToLight, lightNormal, lightMat);
glm::vec3 f = material.color; //if materials have more than 1 bxdf, need to implement function
float absDot = AbsDot(rayDirToLight, intersection.surfaceNormal);
pathSegments[idx].color *= ((f * sampleLiResult * absDot * visibility));
}//end direct lighting
}//end if not a light
}//end if isect exists
else
{
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}//end else
}
}
/*
DIRECT LIGHTING
By taking a final ray directly to a random point on an
emissive object acting as a light source.
Or more advanced [PBRT 15.1.1].
Only want to do this at last bounce (when remaining bounces == 0?)
Just make remainingBounces 0 at the end so it only runs through this once
*/
__global__ void shadeDirectMaterial(
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, int depth
, Geom * lights
, int numLights
, Geom * geoms
, int numGeoms
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
if (pathSegments[idx].remainingBounces <= 0)
{
return;
}
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f)
{
Material isectMaterial = materials[intersection.materialId];
//This will return light's color if isect is of a light
glm::vec3 leResult = Le(-pathSegments[idx].ray.direction, intersection.surfaceNormal, isectMaterial);
//If isect belongs to a light
if (isectMaterial.emittance > 0.0f)
{
pathSegments[idx].color *= leResult; //QUESTION: return this or multiply it?
//pathSegments[idx].color *= (isectMaterial.color * isectMaterial.emittance);
pathSegments[idx].remainingBounces = 0;
return;
}
//Select random light source from lights array ------------------------
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth); //QUESTION: should this be 0 or depth?
thrust::uniform_real_distribution<float> u01(0, 1);
int randLightIdx = glm::min(
(int)glm::floor(u01(rng) * numLights),
(numLights - 1));
Geom currLight = lights[randLightIdx];
//Call light's Sample_Li
//This gets us ray direction from isect to random point on light
//We want to take this direction and shoot ray from isect's origin towards light
//(might need to offset origin a bit off isect's normal so that we don't hit isect's object)
//This also returns the color of the light L(isect on light, direction from isect to random point on light)
//(check if direction needs to be negated)
glm::vec3 ptOnLight;
glm::vec3 lightNormal;
if (currLight.type == SPHERE)
{
glm::vec2 sample(u01(rng), u01(rng));
ptOnLight = sampleSphere(sample, currLight, lightNormal);
}
else if (currLight.type == CUBE)
{
glm::vec3 sample(u01(rng), u01(rng), u01(rng));
ptOnLight = sampleCube(sample, currLight, lightNormal);
}
//Create shadow feeler ray ------------------------
glm::vec3 _dirToLight = ptOnLight - intersection.intersectionPt;
glm::vec3 rayDirToLight = glm::normalize(_dirToLight);
Ray rayToLight = spawnRay(intersection.intersectionPt, intersection.surfaceNormal, rayDirToLight);
//Get the intersection from spawning this new shadow feeler ray ------------------------
ShadeableIntersection shadowIsect;
rayIntersect(rayToLight, geoms, numGeoms, shadowIsect);
//If length of the ray to light (before normalization!)
//is greater than length of the ray to shadowIsect (aka t value), then isect is in shadow
//OR if you hit something that's not the light that you sampled, then you're in shadow
//OR if you dist b/w shadowIsect and isect < dist b/w light and isect - 0.001 , you're in shadow
glm::vec3 visibility(1.0f);
if (shadowIsect.t > 0.0f)
{
visibility = ((glm::length(_dirToLight) - 0.1 > shadowIsect.t)) ? glm::vec3(0.0f) : glm::vec3(1.0f);
}
//Other LTE components ------------------------
//Assuming that light is two sided here
//Otherwise it would be:
//glm::vec3 colorOnLight = Le(rayDirToLight, material, normal from sample function);
Material lightMat = materials[currLight.materialid];
glm::vec3 sampleLiResult = lightMat.color * lightMat.emittance;
//glm::vec3 sampleLiResult = Le(rayDirToLight, lightNormal, lightMat);
glm::vec3 f = isectMaterial.color; //if materials have more than 1 bxdf, need to implement function
float absDot = AbsDot(rayDirToLight, intersection.surfaceNormal);
pathSegments[idx].color *= (leResult + (f * sampleLiResult * absDot * visibility));
pathSegments[idx].remainingBounces = 0;
}
else
{
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}//end if idx < num_paths
}//end direct lighting
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
// =============================================================================
// PATH TERMINATION SCAN KERNEL
// =============================================================================
// Check if remaining bounces == 0
// Check if path intersection t value == -1 (didn't hit anything)
//UNUSED
__global__ void kernMapRemainingBouncesToBoolean(int n, int *bools, PathSegment *pathSegments)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n)
{
//If path's remanining bounces is > 0, mark as 1, else 0
PathSegment currPath = pathSegments[index];
if (currPath.remainingBounces > 0) bools[index] = 1;
else bools[index] = 0;
}
}//end kernMapRemainingBounces
//UNUSED
__global__ void kernMapNoIsectPathToBoolean(int n, int *bools, ShadeableIntersection *intersections)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n)
{
ShadeableIntersection currIsect = intersections[index];
//if()
}
}
// Predicate for thrust::partition
struct hasRemainingBounces
{
__host__ __device__
bool operator()(const PathSegment &pathSegment)
{
return pathSegment.remainingBounces > 0;
}
};
//Fill dev_PathSegIndices and dev_IsectIndices with their corresponding material ID
// These arrays should essentially be the same since pathSeg's and Isects correspond to each other
__global__ void kernSortByMaterial(int n, int *pathSegIndices, int *isectIndices, ShadeableIntersection *isects)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n)
{
int currMatID = isects[index].materialId;
pathSegIndices[index] = currMatID;
isectIndices[index] = currMatID;
}
}
// =============================================================================
// PATH TRACING CPU FUNCTION
// =============================================================================
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter)
{
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
startCpuTimer();
hipLaunchKernelGGL(( generateRayFromCamera), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
//For stream compaction
int num_remainingPaths = num_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete)
{
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
//dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
dim3 numblocksPathSegmentTracing = (num_remainingPaths + blockSize1d - 1) / blockSize1d;
// Compute intersections -----------------------------------------------------------------
//Caching first bounce
//Don't start at iter = 0, that's ray from camera to screen
if (CACHE_FIRST_BOUNCE && depth == 0 && iter == 1)
{
hipLaunchKernelGGL(( computeIntersections), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
depth
, num_remainingPaths //num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_IsectCached
);
}//end if caching first bounce
else
{
hipLaunchKernelGGL(( computeIntersections), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
depth
, num_remainingPaths //num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
}//end else not caching first bounce
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
depth++;
// Shading ----------------------------------------------------------------------------
// TODO: --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
if (CACHE_FIRST_BOUNCE && depth == 0 && iter == 1)
{
if (SORT_BY_MATERIAL)
{
//Store material ID's in dev_PathSegIndices and dev_IsectIndices respectively
kernSortByMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (num_remainingPaths, dev_PathSegIndices, dev_IsectIndices, dev_IsectCached);
//Sort the PathSegments and Isects arrays in place according to materialID's placed in their corresponding dev_indices arrays
thrust::sort_by_key(thrust::device, dev_PathSegIndices, dev_PathSegIndices + num_remainingPaths, dev_paths);
thrust::sort_by_key(thrust::device, dev_IsectIndices, dev_IsectIndices + num_remainingPaths, dev_IsectCached);
}
if (DIRECT_LIGHTING)
{
shadeDirectMaterial << <numblocksPathSegmentTracing, blockSize1d >> >(
iter,
num_remainingPaths,
dev_IsectCached,
dev_paths,
dev_materials,
depth,
dev_lights,
hst_scene->lights.size(),
dev_geoms,
hst_scene->geoms.size());
}
else if (NAIVE_AND_DIRECT)
{
shadeNaiveAndDirectMaterial << <numblocksPathSegmentTracing, blockSize1d >> >(
iter,
num_remainingPaths,
dev_IsectCached,
dev_paths,
dev_materials,
depth,
dev_lights,
hst_scene->lights.size(),
dev_geoms,
hst_scene->geoms.size());
}
else
{
shadeNaiveMaterial << <numblocksPathSegmentTracing, blockSize1d >> >(
iter,
num_remainingPaths, //num_paths,
dev_IsectCached,
dev_paths,
dev_materials,
depth);
}
}//end if caching first bounce
//Operating on everything else after first bounce
else
{
if (SORT_BY_MATERIAL)
{
//Store material ID's in dev_PathSegIndices and dev_IsectIndices respectively
kernSortByMaterial << <numblocksPathSegmentTracing, blockSize1d >> >(num_remainingPaths, dev_PathSegIndices, dev_IsectIndices, dev_intersections);
//Sort the PathSegments and Isects arrays in place according to materialID's placed in their corresponding dev_indices arrays
thrust::sort_by_key(thrust::device, dev_PathSegIndices, dev_PathSegIndices + num_remainingPaths, dev_paths);
thrust::sort_by_key(thrust::device, dev_IsectIndices, dev_IsectIndices + num_remainingPaths, dev_intersections);
}
if (DIRECT_LIGHTING)
{
shadeDirectMaterial << <numblocksPathSegmentTracing, blockSize1d >> >(
iter,
num_remainingPaths,
dev_intersections,
dev_paths,
dev_materials,
depth,
dev_lights,
hst_scene->lights.size(),
dev_geoms,
hst_scene->geoms.size());
}
else if (NAIVE_AND_DIRECT)
{
shadeNaiveAndDirectMaterial << <numblocksPathSegmentTracing, blockSize1d >> >(
iter,
num_remainingPaths,
dev_intersections,
dev_paths,
dev_materials,
depth,
dev_lights,
hst_scene->lights.size(),
dev_geoms,
hst_scene->geoms.size());
}
else
{
shadeNaiveMaterial << <numblocksPathSegmentTracing, blockSize1d >> >(
iter,
num_remainingPaths, //num_paths,
dev_intersections,
dev_paths,
dev_materials,
depth);
}
}//end else not caching first bounce
// Stream Compaction Terminated Paths -----------------------------------------------------------------
if (STREAM_COMPACTION)
{
PathSegment* lastRemainingPath = thrust::partition(thrust::device, dev_paths, dev_paths + num_remainingPaths, hasRemainingBounces());
num_remainingPaths = lastRemainingPath - dev_paths;
// TODO: should be based off stream compaction results.
// To test anti-aliasing, change depth >= 1, and move the camera around. You'll see jagged edges become smoother
iterationComplete = ((depth >= traceDepth || num_remainingPaths <= 0) ? true : false);
}
else
{
iterationComplete = (depth >= traceDepth) ? true : false;
}
}//end while
endCpuTimer();
printCPUTimer(iter);
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
hipLaunchKernelGGL(( finalGather), dim3(numBlocksPixels), dim3(blockSize1d), 0, 0, num_paths, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}//end pathTrace
| 9efcfb2f598c1d28afd129e8e77547d07fd39869.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
//Include Stream Compaction files
#include "stream_compaction\efficient.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
// =============================================================================
// TIMER FUNCTIONS
// =============================================================================
using time_point_t = std::chrono::high_resolution_clock::time_point;
time_point_t time_start_cpu;
time_point_t time_end_cpu;
bool cpu_timer_started = false;
float prev_elapsed_time_cpu_milliseconds = 0.f;
void startCpuTimer()
{
if (cpu_timer_started) { throw std::runtime_error("CPU timer already started"); }
cpu_timer_started = true;
time_start_cpu = std::chrono::high_resolution_clock::now();
}
void endCpuTimer()
{
time_end_cpu = std::chrono::high_resolution_clock::now();
if (!cpu_timer_started) { throw std::runtime_error("CPU timer not started"); }
std::chrono::duration<double, std::milli> duro = time_end_cpu - time_start_cpu;
prev_elapsed_time_cpu_milliseconds =
static_cast<decltype(prev_elapsed_time_cpu_milliseconds)>(duro.count());
cpu_timer_started = false;
}
void printCPUTimer(int iter)
{
cout << "Time (in ms): " << prev_elapsed_time_cpu_milliseconds << endl;
cout << "Iteration: " << iter << endl;
}
// =============================================================================
// PATH TRACE INIT AND FREE CPU FUNCTIONS
// =============================================================================
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
//PathSeg and Isect indices sorted by Material
static int * dev_PathSegIndices = NULL;
static int * dev_IsectIndices = NULL;
//Caching first bounce
static ShadeableIntersection * dev_IsectCached = NULL;
//Lights array for direct lighting
static Geom * dev_lights = NULL;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
//PathSeg and Isect indices sorted by Material
cudaMalloc(&dev_PathSegIndices, pixelcount * sizeof(int));
cudaMalloc(&dev_IsectIndices, pixelcount * sizeof(int));
//Caching first bounce
cudaMalloc(&dev_IsectCached, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_IsectCached, 0, pixelcount * sizeof(ShadeableIntersection));
//Lights array for direct lighting
cudaMalloc(&dev_lights, scene->lights.size() * sizeof(Geom));
cudaMemcpy(dev_lights, scene->lights.data(), scene->lights.size() * sizeof(Geom), cudaMemcpyHostToDevice);
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
// TODO: clean up any extra device memory you created
cudaFree(dev_PathSegIndices);
cudaFree(dev_IsectIndices);
cudaFree(dev_IsectCached);
cudaFree(dev_lights);
checkCUDAError("pathtraceFree");
}
// =============================================================================
// GENERATE RAY FROM CAMERA KERNEL FUNCTION
// =============================================================================
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y)
{
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
// TODO: implement antialiasing by jittering the ray
// Note: If antialiasing -- can NOT cache first bounce!
thrust::default_random_engine rng = makeSeededRandomEngine(iter, x, y);
thrust::uniform_real_distribution<float> u01(-1, 1);
float offset_x = u01(rng);
float offset_y = u01(rng);
if (ANTI_ALIASING)
{
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x + offset_x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y + offset_y - (float)cam.resolution.y * 0.5f)
);
}
else
{
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
}
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
//Depth of field
//Generate sample point on disk lens, shoot ray through that
if (cam.lensRadius > 0.0f)
{
thrust::uniform_real_distribution<float> u02(0, 1);
glm::vec2 sample = glm::vec2(u02(rng), u02(rng));
glm::vec3 pLens = cam.lensRadius * squareToDiskConocentric(sample);
glm::vec3 pFocus = cam.focalDistance * segment.ray.direction + segment.ray.origin;
glm::vec3 aperaturePt = segment.ray.origin + (cam.up * pLens[1]) + (cam.right * pLens[0]);
segment.ray.origin = aperaturePt;
segment.ray.direction = glm::normalize(pFocus - aperaturePt);
}//end DOF check
}//end if
}
// =============================================================================
// COMPUTE INTERSECTION KERNEL FUNCTION
// =============================================================================
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}//end for all geoms
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal; //QUESTION: should this be normalized?
//intersections[path_index].surfaceNormal = glm::normalize(normal);
intersections[path_index].intersectionPt = intersect_point;
}
}
}
// =============================================================================
// COMPUTE RAY INTERSECTION
// =============================================================================
__host__ __device__
void rayIntersect(
Ray ray
, Geom * geoms
, int geoms_size
, ShadeableIntersection &isect)
{
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}//end for all geoms
if (hit_geom_index == -1)
{
isect.t = -1.0f;
}
else
{
//The ray hits something
isect.t = t_min;
isect.materialId = geoms[hit_geom_index].materialid;
isect.surfaceNormal = normal;
isect.intersectionPt = intersect_point;
}
}
// =============================================================================
// SHADER KERNEL FUNCTIONS + HELPER FUNCTIONS
// =============================================================================
__host__ __device__
glm::vec3 Le(const glm::vec3 &wo, const glm::vec3 &n, const Material &material)
{
//If isect is a light, calculated emitted light
if (material.emittance > 0.0f)
{
//NOTE: to test if getting black, just return material.emittance
//If normal of object hit and ray are in same direction, return light's color
if (glm::dot(n, wo) > 0.0f)
{
return material.color * material.emittance;
}
else
{
return glm::vec3(0.0f);
}
}
else
{
return glm::vec3(0.0f);
}
}
/*
UNUSED
Get an intersection on the surface of the light
Check if resultant PDF is 0 or that ref
*/
__host__ __device__
glm::vec3 Sample_Li(const Geom &light, const ShadeableIntersection &isect, thrust::default_random_engine &rng, glm::vec3 *wi)
{
thrust::uniform_real_distribution<float> u01(0, 1);
glm::vec2 sample2D = glm::vec2(u01(rng), u01(rng));
//SAMPLE THE SHAPE
//glm::vec3 resultIsectPt = ;
//glm::vec3 resultIsectNormal = ;
//if(pdf < EPSILON) --> return black
//if (resultIsectPt == isect.intersectionPt)
//{
// return glm::vec3(0.0f);
//}
//*wi = glm::normalize(resultIsectPt - isect.intersectionPt);
//return Le(-*wi, , resultIsectNormal);
return glm::vec3(1.0f);
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, int depth
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
if (pathSegments[idx].remainingBounces <= 0)
{
return;
}
ShadeableIntersection intersection = shadeableIntersections[idx];
// if the intersection exists...
if (intersection.t > 0.0f)
{
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
//Use depth for non-compact version to work properly
//https://groups.google.com/forum/#!topic/cis-565-fall-2017/thgdf2jzDyo
//thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth);
//thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f)
{
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else
{
//float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
//pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
//pathSegments[idx].color *= u01(rng); // apply some noise because why not
scatterRay(pathSegments[idx], intersection.intersectionPt, intersection.surfaceNormal, material, rng);
pathSegments[idx].remainingBounces--;
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}//end if
else
{
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}//end else
}
}
// NAIVE
__global__ void shadeNaiveMaterial(
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, int depth
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
if (pathSegments[idx].remainingBounces <= 0)
{
return;
}
ShadeableIntersection intersection = shadeableIntersections[idx];
// if the intersection exists...
if (intersection.t > 0.0f)
{
thrust::default_random_engine rng;
if (STREAM_COMPACTION) rng = makeSeededRandomEngine(iter, idx, 0);
else rng = makeSeededRandomEngine(iter, idx, depth);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f)
{
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
else
{
scatterRay(pathSegments[idx], intersection.intersectionPt, intersection.surfaceNormal, material, rng);
pathSegments[idx].remainingBounces--;
}
}//end if
else
{
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}//end else
}
}
// NAIVE AND DIRECT
__global__ void shadeNaiveAndDirectMaterial(
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, int depth
, Geom * lights
, int numLights
, Geom * geoms
, int numGeoms
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
if (pathSegments[idx].remainingBounces < 0)
{
return;
}
ShadeableIntersection intersection = shadeableIntersections[idx];
// if the intersection exists...
if (intersection.t > 0.0f)
{
thrust::default_random_engine rng;
if (STREAM_COMPACTION) rng = makeSeededRandomEngine(iter, idx, 0);
else rng = makeSeededRandomEngine(iter, idx, depth);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f)
{
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
else
{
scatterRay(pathSegments[idx], intersection.intersectionPt, intersection.surfaceNormal, material, rng);
pathSegments[idx].remainingBounces--;
//DIRECT LIGHTING
if (pathSegments[idx].remainingBounces == 0)
{
//Select random light source from lights array ------------------------
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth); //QUESTION: should this be 0 or depth?
thrust::uniform_real_distribution<float> u01(0, 1);
int randLightIdx = glm::min(
(int)glm::floor(u01(rng) * numLights),
(numLights - 1));
Geom currLight = lights[randLightIdx];
glm::vec3 ptOnLight;
glm::vec3 lightNormal;
if (currLight.type == SPHERE)
{
glm::vec2 sample(u01(rng), u01(rng));
ptOnLight = sampleSphere(sample, currLight, lightNormal);
}
else if (currLight.type == CUBE)
{
glm::vec3 sample(u01(rng), u01(rng), u01(rng));
ptOnLight = sampleCube(sample, currLight, lightNormal);
}
//Create shadow feeler ray ------------------------
glm::vec3 _dirToLight = ptOnLight - intersection.intersectionPt;
glm::vec3 rayDirToLight = glm::normalize(_dirToLight);
Ray rayToLight = spawnRay(intersection.intersectionPt, intersection.surfaceNormal, rayDirToLight);
//Get the intersection from spawning this new shadow feeler ray ------------------------
ShadeableIntersection shadowIsect;
rayIntersect(rayToLight, geoms, numGeoms, shadowIsect);
glm::vec3 visibility(1.0f);
if (shadowIsect.t > 0.0f)
{
visibility = ((glm::length(_dirToLight) - 0.1 > shadowIsect.t)) ? glm::vec3(0.0f) : glm::vec3(1.0f);
}
//Other LTE components ------------------------
Material lightMat = materials[currLight.materialid];
glm::vec3 sampleLiResult = lightMat.color * lightMat.emittance;
//glm::vec3 sampleLiResult = Le(rayDirToLight, lightNormal, lightMat);
glm::vec3 f = material.color; //if materials have more than 1 bxdf, need to implement function
float absDot = AbsDot(rayDirToLight, intersection.surfaceNormal);
pathSegments[idx].color *= ((f * sampleLiResult * absDot * visibility));
}//end direct lighting
}//end if not a light
}//end if isect exists
else
{
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}//end else
}
}
/*
DIRECT LIGHTING
By taking a final ray directly to a random point on an
emissive object acting as a light source.
Or more advanced [PBRT 15.1.1].
Only want to do this at last bounce (when remaining bounces == 0?)
Just make remainingBounces 0 at the end so it only runs through this once
*/
__global__ void shadeDirectMaterial(
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, int depth
, Geom * lights
, int numLights
, Geom * geoms
, int numGeoms
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
if (pathSegments[idx].remainingBounces <= 0)
{
return;
}
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f)
{
Material isectMaterial = materials[intersection.materialId];
//This will return light's color if isect is of a light
glm::vec3 leResult = Le(-pathSegments[idx].ray.direction, intersection.surfaceNormal, isectMaterial);
//If isect belongs to a light
if (isectMaterial.emittance > 0.0f)
{
pathSegments[idx].color *= leResult; //QUESTION: return this or multiply it?
//pathSegments[idx].color *= (isectMaterial.color * isectMaterial.emittance);
pathSegments[idx].remainingBounces = 0;
return;
}
//Select random light source from lights array ------------------------
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth); //QUESTION: should this be 0 or depth?
thrust::uniform_real_distribution<float> u01(0, 1);
int randLightIdx = glm::min(
(int)glm::floor(u01(rng) * numLights),
(numLights - 1));
Geom currLight = lights[randLightIdx];
//Call light's Sample_Li
//This gets us ray direction from isect to random point on light
//We want to take this direction and shoot ray from isect's origin towards light
//(might need to offset origin a bit off isect's normal so that we don't hit isect's object)
//This also returns the color of the light L(isect on light, direction from isect to random point on light)
//(check if direction needs to be negated)
glm::vec3 ptOnLight;
glm::vec3 lightNormal;
if (currLight.type == SPHERE)
{
glm::vec2 sample(u01(rng), u01(rng));
ptOnLight = sampleSphere(sample, currLight, lightNormal);
}
else if (currLight.type == CUBE)
{
glm::vec3 sample(u01(rng), u01(rng), u01(rng));
ptOnLight = sampleCube(sample, currLight, lightNormal);
}
//Create shadow feeler ray ------------------------
glm::vec3 _dirToLight = ptOnLight - intersection.intersectionPt;
glm::vec3 rayDirToLight = glm::normalize(_dirToLight);
Ray rayToLight = spawnRay(intersection.intersectionPt, intersection.surfaceNormal, rayDirToLight);
//Get the intersection from spawning this new shadow feeler ray ------------------------
ShadeableIntersection shadowIsect;
rayIntersect(rayToLight, geoms, numGeoms, shadowIsect);
//If length of the ray to light (before normalization!)
//is greater than length of the ray to shadowIsect (aka t value), then isect is in shadow
//OR if you hit something that's not the light that you sampled, then you're in shadow
//OR if you dist b/w shadowIsect and isect < dist b/w light and isect - 0.001 , you're in shadow
glm::vec3 visibility(1.0f);
if (shadowIsect.t > 0.0f)
{
visibility = ((glm::length(_dirToLight) - 0.1 > shadowIsect.t)) ? glm::vec3(0.0f) : glm::vec3(1.0f);
}
//Other LTE components ------------------------
//Assuming that light is two sided here
//Otherwise it would be:
//glm::vec3 colorOnLight = Le(rayDirToLight, material, normal from sample function);
Material lightMat = materials[currLight.materialid];
glm::vec3 sampleLiResult = lightMat.color * lightMat.emittance;
//glm::vec3 sampleLiResult = Le(rayDirToLight, lightNormal, lightMat);
glm::vec3 f = isectMaterial.color; //if materials have more than 1 bxdf, need to implement function
float absDot = AbsDot(rayDirToLight, intersection.surfaceNormal);
pathSegments[idx].color *= (leResult + (f * sampleLiResult * absDot * visibility));
pathSegments[idx].remainingBounces = 0;
}
else
{
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}//end if idx < num_paths
}//end direct lighting
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
// =============================================================================
// PATH TERMINATION SCAN KERNEL
// =============================================================================
// Check if remaining bounces == 0
// Check if path intersection t value == -1 (didn't hit anything)
//UNUSED
__global__ void kernMapRemainingBouncesToBoolean(int n, int *bools, PathSegment *pathSegments)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n)
{
//If path's remanining bounces is > 0, mark as 1, else 0
PathSegment currPath = pathSegments[index];
if (currPath.remainingBounces > 0) bools[index] = 1;
else bools[index] = 0;
}
}//end kernMapRemainingBounces
//UNUSED
__global__ void kernMapNoIsectPathToBoolean(int n, int *bools, ShadeableIntersection *intersections)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n)
{
ShadeableIntersection currIsect = intersections[index];
//if()
}
}
// Predicate for thrust::partition
struct hasRemainingBounces
{
__host__ __device__
bool operator()(const PathSegment &pathSegment)
{
return pathSegment.remainingBounces > 0;
}
};
//Fill dev_PathSegIndices and dev_IsectIndices with their corresponding material ID
// These arrays should essentially be the same since pathSeg's and Isects correspond to each other
__global__ void kernSortByMaterial(int n, int *pathSegIndices, int *isectIndices, ShadeableIntersection *isects)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n)
{
int currMatID = isects[index].materialId;
pathSegIndices[index] = currMatID;
isectIndices[index] = currMatID;
}
}
// =============================================================================
// PATH TRACING CPU FUNCTION
// =============================================================================
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter)
{
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
startCpuTimer();
generateRayFromCamera<<<blocksPerGrid2d, blockSize2d>>>(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
//For stream compaction
int num_remainingPaths = num_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete)
{
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
//dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
dim3 numblocksPathSegmentTracing = (num_remainingPaths + blockSize1d - 1) / blockSize1d;
// Compute intersections -----------------------------------------------------------------
//Caching first bounce
//Don't start at iter = 0, that's ray from camera to screen
if (CACHE_FIRST_BOUNCE && depth == 0 && iter == 1)
{
computeIntersections<<<numblocksPathSegmentTracing, blockSize1d>>>(
depth
, num_remainingPaths //num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_IsectCached
);
}//end if caching first bounce
else
{
computeIntersections<<<numblocksPathSegmentTracing, blockSize1d>>>(
depth
, num_remainingPaths //num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
}//end else not caching first bounce
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
depth++;
// Shading ----------------------------------------------------------------------------
// TODO: --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
if (CACHE_FIRST_BOUNCE && depth == 0 && iter == 1)
{
if (SORT_BY_MATERIAL)
{
//Store material ID's in dev_PathSegIndices and dev_IsectIndices respectively
kernSortByMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (num_remainingPaths, dev_PathSegIndices, dev_IsectIndices, dev_IsectCached);
//Sort the PathSegments and Isects arrays in place according to materialID's placed in their corresponding dev_indices arrays
thrust::sort_by_key(thrust::device, dev_PathSegIndices, dev_PathSegIndices + num_remainingPaths, dev_paths);
thrust::sort_by_key(thrust::device, dev_IsectIndices, dev_IsectIndices + num_remainingPaths, dev_IsectCached);
}
if (DIRECT_LIGHTING)
{
shadeDirectMaterial << <numblocksPathSegmentTracing, blockSize1d >> >(
iter,
num_remainingPaths,
dev_IsectCached,
dev_paths,
dev_materials,
depth,
dev_lights,
hst_scene->lights.size(),
dev_geoms,
hst_scene->geoms.size());
}
else if (NAIVE_AND_DIRECT)
{
shadeNaiveAndDirectMaterial << <numblocksPathSegmentTracing, blockSize1d >> >(
iter,
num_remainingPaths,
dev_IsectCached,
dev_paths,
dev_materials,
depth,
dev_lights,
hst_scene->lights.size(),
dev_geoms,
hst_scene->geoms.size());
}
else
{
shadeNaiveMaterial << <numblocksPathSegmentTracing, blockSize1d >> >(
iter,
num_remainingPaths, //num_paths,
dev_IsectCached,
dev_paths,
dev_materials,
depth);
}
}//end if caching first bounce
//Operating on everything else after first bounce
else
{
if (SORT_BY_MATERIAL)
{
//Store material ID's in dev_PathSegIndices and dev_IsectIndices respectively
kernSortByMaterial << <numblocksPathSegmentTracing, blockSize1d >> >(num_remainingPaths, dev_PathSegIndices, dev_IsectIndices, dev_intersections);
//Sort the PathSegments and Isects arrays in place according to materialID's placed in their corresponding dev_indices arrays
thrust::sort_by_key(thrust::device, dev_PathSegIndices, dev_PathSegIndices + num_remainingPaths, dev_paths);
thrust::sort_by_key(thrust::device, dev_IsectIndices, dev_IsectIndices + num_remainingPaths, dev_intersections);
}
if (DIRECT_LIGHTING)
{
shadeDirectMaterial << <numblocksPathSegmentTracing, blockSize1d >> >(
iter,
num_remainingPaths,
dev_intersections,
dev_paths,
dev_materials,
depth,
dev_lights,
hst_scene->lights.size(),
dev_geoms,
hst_scene->geoms.size());
}
else if (NAIVE_AND_DIRECT)
{
shadeNaiveAndDirectMaterial << <numblocksPathSegmentTracing, blockSize1d >> >(
iter,
num_remainingPaths,
dev_intersections,
dev_paths,
dev_materials,
depth,
dev_lights,
hst_scene->lights.size(),
dev_geoms,
hst_scene->geoms.size());
}
else
{
shadeNaiveMaterial << <numblocksPathSegmentTracing, blockSize1d >> >(
iter,
num_remainingPaths, //num_paths,
dev_intersections,
dev_paths,
dev_materials,
depth);
}
}//end else not caching first bounce
// Stream Compaction Terminated Paths -----------------------------------------------------------------
if (STREAM_COMPACTION)
{
PathSegment* lastRemainingPath = thrust::partition(thrust::device, dev_paths, dev_paths + num_remainingPaths, hasRemainingBounces());
num_remainingPaths = lastRemainingPath - dev_paths;
// TODO: should be based off stream compaction results.
// To test anti-aliasing, change depth >= 1, and move the camera around. You'll see jagged edges become smoother
iterationComplete = ((depth >= traceDepth || num_remainingPaths <= 0) ? true : false);
}
else
{
iterationComplete = (depth >= traceDepth) ? true : false;
}
}//end while
endCpuTimer();
printCPUTimer(iter);
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather<<<numBlocksPixels, blockSize1d>>>(num_paths, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}//end pathTrace
|
ac38de68d584788499f5eb2398ecacb445c95b5c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda.h>
#include<stdio.h>
#include<math.h>
__global__
void vecConvKernel(float* A, float* B, float* C,int*D, int n,int m){
//identify the index of the data to be read
int i= threadIdx.x + blockDim.x * blockIdx.x;
int j;
// if(threadIdx.x==0)
// printf("threadIdx.x is 0, blockid: %d\n",blockIdx.x);
float val=0.0;
int N=n+m-2;
int start=0;
int end=n;
//calculate the sum and store
if(i<=N){
if(i-m>start)
start=i-m;
if(i<end)
end=i;
printf("start:%d end:%d\n",start,end);
for(j=start;j<=end;j++){
val+=A[j]*B[i-j];
}
}
// else{
// printf("i:%d n:%d blockdim:%d\n",i,N,blockDim.x);
// }
C[i]=val;
D[i]=end;
}
__host__
void vecConv(float* A,float* B,float* C,int* D, int n, int m){
int c=ceil(n/256.0);
int size1 = n * sizeof(float);
int size2 = m * sizeof(float);
int size3 = (n+m-1) * sizeof(float);
float *d_A, *d_B, *d_C; //A of size n and B of size m
int *d_D;
//Allocate device memory for A,B,C
hipMalloc((void**)&d_A, size1);
hipMalloc((void**)&d_B, size2);
hipMalloc((void**)&d_C, size3);
hipMalloc((void**)&d_D, size3);
//copy A,B to device memory
hipMemcpy(d_A, A, size1, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, size2, hipMemcpyHostToDevice);
//call kernal function that the calculates sum and stores it in C
double blocks = ceil((n+m-1)/256.0);
printf("blocks:%lf\n",blocks);
clock_t start,end;
start=clock();
//printf("n+m:%d,%f blocks:%d,%f\n",(n+m-1),(n+m-1),ceil((float)(n+m-1)/256.0),ceil((float)(n+m-1)/256.0));
hipLaunchKernelGGL(( vecConvKernel), dim3(blocks),dim3(256) , 0, 0, d_A,d_B,d_C,d_D,n,m);
hipDeviceSynchronize();
end=clock();
printf("time:%lf\n",(double)(end-start)/CLOCKS_PER_SEC);
//the y and z dimensions are set to 1 by default
//copy C from devce memory
hipMemcpy( C,d_C, size3, hipMemcpyDeviceToHost);
hipMemcpy( D,d_D, size3, hipMemcpyDeviceToHost);
//free device memories
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipFree(d_D);
}
//Kernal function that runs in each thread
int main(){
float *A,*B,*C;
int*D;
int n=1024*1024;
A=(float*)malloc(n*sizeof(float));
B=(float*)malloc(n*sizeof(float));
C=(float*)malloc((n+n-1)*sizeof(float));
D=(int*)malloc((n+n-1)*sizeof(int));
int i;
for(i=0;i<n;i++){
A[i]=(float)i;
B[i]=(float)2*i;
}
vecConv(A,B,C,D,n,n);
// for(i=1024*1023;i<1024*1024;i++){
// printf("%d\n",D[i]);
// }
free(A);
free(B);
free(C);
return 0;
}
| ac38de68d584788499f5eb2398ecacb445c95b5c.cu | #include<cuda.h>
#include<stdio.h>
#include<math.h>
__global__
void vecConvKernel(float* A, float* B, float* C,int*D, int n,int m){
//identify the index of the data to be read
int i= threadIdx.x + blockDim.x * blockIdx.x;
int j;
// if(threadIdx.x==0)
// printf("threadIdx.x is 0, blockid: %d\n",blockIdx.x);
float val=0.0;
int N=n+m-2;
int start=0;
int end=n;
//calculate the sum and store
if(i<=N){
if(i-m>start)
start=i-m;
if(i<end)
end=i;
printf("start:%d end:%d\n",start,end);
for(j=start;j<=end;j++){
val+=A[j]*B[i-j];
}
}
// else{
// printf("i:%d n:%d blockdim:%d\n",i,N,blockDim.x);
// }
C[i]=val;
D[i]=end;
}
__host__
void vecConv(float* A,float* B,float* C,int* D, int n, int m){
int c=ceil(n/256.0);
int size1 = n * sizeof(float);
int size2 = m * sizeof(float);
int size3 = (n+m-1) * sizeof(float);
float *d_A, *d_B, *d_C; //A of size n and B of size m
int *d_D;
//Allocate device memory for A,B,C
cudaMalloc((void**)&d_A, size1);
cudaMalloc((void**)&d_B, size2);
cudaMalloc((void**)&d_C, size3);
cudaMalloc((void**)&d_D, size3);
//copy A,B to device memory
cudaMemcpy(d_A, A, size1, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size2, cudaMemcpyHostToDevice);
//call kernal function that the calculates sum and stores it in C
double blocks = ceil((n+m-1)/256.0);
printf("blocks:%lf\n",blocks);
clock_t start,end;
start=clock();
//printf("n+m:%d,%f blocks:%d,%f\n",(n+m-1),(n+m-1),ceil((float)(n+m-1)/256.0),ceil((float)(n+m-1)/256.0));
vecConvKernel<<< blocks,256 >>>(d_A,d_B,d_C,d_D,n,m);
cudaDeviceSynchronize();
end=clock();
printf("time:%lf\n",(double)(end-start)/CLOCKS_PER_SEC);
//the y and z dimensions are set to 1 by default
//copy C from devce memory
cudaMemcpy( C,d_C, size3, cudaMemcpyDeviceToHost);
cudaMemcpy( D,d_D, size3, cudaMemcpyDeviceToHost);
//free device memories
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(d_D);
}
//Kernal function that runs in each thread
int main(){
float *A,*B,*C;
int*D;
int n=1024*1024;
A=(float*)malloc(n*sizeof(float));
B=(float*)malloc(n*sizeof(float));
C=(float*)malloc((n+n-1)*sizeof(float));
D=(int*)malloc((n+n-1)*sizeof(int));
int i;
for(i=0;i<n;i++){
A[i]=(float)i;
B[i]=(float)2*i;
}
vecConv(A,B,C,D,n,n);
// for(i=1024*1023;i<1024*1024;i++){
// printf("%d\n",D[i]);
// }
free(A);
free(B);
free(C);
return 0;
}
|
87c36ad52583645d33320ff858335672162bfb03.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2016, David lu
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "../../config.h"
#ifdef __PARALLELTYPE__
#if __PARALLELTYPE__ == __CUDA__
#include "../../tensor/cuda/cuda_log.h"
namespace cacu {
__global__ void _k_CACU_SAXPY_ATOMIC_CUDA(float *x, float a, float *y,
const int length) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
for (int i = threadid; i < length; i += BLOCKNUM * THREADNUM) {
atomicAdd(y + i, a * x[i]);
}
}
extern "C" void cacu_saxpy_atomic_cuda(float *x, float a, float *y,
const int length) {
hipLaunchKernelGGL(( _k_CACU_SAXPY_ATOMIC_CUDA), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, x, a, y, length);
CUDA_CHECK(hipDeviceSynchronize());
}
__global__ void _k_CACU_ISAXB_CUDA(float *x, const int length, const float a,
int *index_, const float b, float *y) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
if (index_[0] >= 0) {
for (int i = threadid; i < length; i += BLOCKNUM * THREADNUM) {
y[i] = x[i];
}
__syncthreads();
if (threadid == 0)
y[index_[0]] = a * x[index_[0]] + b;
} else {
for (int i = threadid; i < length; i += BLOCKNUM * THREADNUM) {
y[i] = 0;
}
}
}
/**
* @cacu_isaxdb_cuda
* y[index] = x[index]*a + b
*/
extern "C" void cacu_isaxb_cuda(float *x, const int length, const float a,
int *index_, const float b, float *y) {
hipLaunchKernelGGL(( _k_CACU_ISAXB_CUDA), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, x, length, a, index_, b, y);
CUDA_CHECK(hipDeviceSynchronize());
}
__global__ void _k_ARGMAX_CUDA(float *x, const int length,
unsigned int *index_) {
__shared__ float shared_data[THREADNUM];
__shared__ unsigned int index_data[THREADNUM];
int tid = threadIdx.x;
int max_length = THREADNUM;
if (THREADNUM > length)
max_length = length;
if (tid < max_length) {
shared_data[tid] = x[tid];
index_data[tid] = tid;
}
for (unsigned int i = tid; i < length; i += THREADNUM) {
if (x[i] > shared_data[tid]) {
shared_data[tid] = x[i];
index_data[tid] = i;
}
}
__syncthreads();
if (tid == 0) {
for (int i = 1; i < max_length; ++i) {
if (shared_data[0] < shared_data[i]) {
shared_data[0] = shared_data[i];
index_data[0] = index_data[i];
}
}
index_[0] = index_data[0];
}
}
extern "C" void cacu_argmax_cuda(float *x, const int length,
unsigned int *index_) {
hipLaunchKernelGGL(( _k_ARGMAX_CUDA), dim3(1), dim3(THREADNUM), 0, 0, x, length, index_);
CUDA_CHECK(hipDeviceSynchronize());
}
extern "C" void cacu_transpose_cuda(float *mtx, const int m, const int n) {
}
__global__ void _k_CACU_CLIP_VEC_CUDA(float *data, const float threshold,
const int length) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
for (int i = threadid; i < length; i += BLOCKNUM * THREADNUM) {
data[i] = data[i] * (abs(data[i]) >= threshold);
}
}
extern "C" void cacu_clip_vec_cuda(float *data, const float threshold,
const int length) {
hipLaunchKernelGGL(( _k_CACU_CLIP_VEC_CUDA), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, data, threshold, length);
CUDA_CHECK(hipDeviceSynchronize());
}
}
#endif
#endif
| 87c36ad52583645d33320ff858335672162bfb03.cu | /*
Copyright (c) 2016, David lu
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "../../config.h"
#ifdef __PARALLELTYPE__
#if __PARALLELTYPE__ == __CUDA__
#include "../../tensor/cuda/cuda_log.h"
namespace cacu {
__global__ void _k_CACU_SAXPY_ATOMIC_CUDA(float *x, float a, float *y,
const int length) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
for (int i = threadid; i < length; i += BLOCKNUM * THREADNUM) {
atomicAdd(y + i, a * x[i]);
}
}
extern "C" void cacu_saxpy_atomic_cuda(float *x, float a, float *y,
const int length) {
_k_CACU_SAXPY_ATOMIC_CUDA<<<BLOCKNUM, THREADNUM, 0>>>(x, a, y, length);
CUDA_CHECK(cudaThreadSynchronize());
}
__global__ void _k_CACU_ISAXB_CUDA(float *x, const int length, const float a,
int *index_, const float b, float *y) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
if (index_[0] >= 0) {
for (int i = threadid; i < length; i += BLOCKNUM * THREADNUM) {
y[i] = x[i];
}
__syncthreads();
if (threadid == 0)
y[index_[0]] = a * x[index_[0]] + b;
} else {
for (int i = threadid; i < length; i += BLOCKNUM * THREADNUM) {
y[i] = 0;
}
}
}
/**
* @cacu_isaxdb_cuda
* y[index] = x[index]*a + b
*/
extern "C" void cacu_isaxb_cuda(float *x, const int length, const float a,
int *index_, const float b, float *y) {
_k_CACU_ISAXB_CUDA<<<BLOCKNUM, THREADNUM, 0>>>(x, length, a, index_, b, y);
CUDA_CHECK(cudaThreadSynchronize());
}
__global__ void _k_ARGMAX_CUDA(float *x, const int length,
unsigned int *index_) {
__shared__ float shared_data[THREADNUM];
__shared__ unsigned int index_data[THREADNUM];
int tid = threadIdx.x;
int max_length = THREADNUM;
if (THREADNUM > length)
max_length = length;
if (tid < max_length) {
shared_data[tid] = x[tid];
index_data[tid] = tid;
}
for (unsigned int i = tid; i < length; i += THREADNUM) {
if (x[i] > shared_data[tid]) {
shared_data[tid] = x[i];
index_data[tid] = i;
}
}
__syncthreads();
if (tid == 0) {
for (int i = 1; i < max_length; ++i) {
if (shared_data[0] < shared_data[i]) {
shared_data[0] = shared_data[i];
index_data[0] = index_data[i];
}
}
index_[0] = index_data[0];
}
}
extern "C" void cacu_argmax_cuda(float *x, const int length,
unsigned int *index_) {
_k_ARGMAX_CUDA<<<1, THREADNUM, 0>>>(x, length, index_);
CUDA_CHECK(cudaThreadSynchronize());
}
extern "C" void cacu_transpose_cuda(float *mtx, const int m, const int n) {
}
__global__ void _k_CACU_CLIP_VEC_CUDA(float *data, const float threshold,
const int length) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
for (int i = threadid; i < length; i += BLOCKNUM * THREADNUM) {
data[i] = data[i] * (abs(data[i]) >= threshold);
}
}
extern "C" void cacu_clip_vec_cuda(float *data, const float threshold,
const int length) {
_k_CACU_CLIP_VEC_CUDA<<<BLOCKNUM, THREADNUM, 0>>>(data, threshold, length);
CUDA_CHECK(cudaThreadSynchronize());
}
}
#endif
#endif
|
272e11630bb473744b0dae70919efa9d00cfb7ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
__global__ void heat_step(float * d_out, float * d_in)
{
// int block_x = blockIdx.x;
// int block_y = blockIdx.y;
int x_glob;
int y_glob;
int x_total_dim = blockDim.x * gridDim.x;
int y_total_dim = blockDim.y * gridDim.y;
int location;
x_glob = blockDim.x * blockIdx.x + threadIdx.x;
y_glob = blockDim.y * blockIdx.y + threadIdx.y;
location = y_glob * x_total_dim + x_glob;
d_out[location] = 0;
if (x_glob > 0)
{
d_out[location] += 0.25 * d_in[location - 1];
}
if (x_glob < (x_total_dim - 1))
{
d_out[location] += 0.25 * d_in[location + 1];
}
if (y_glob > 0)
{
d_out[location] += 0.25 * d_in[location - x_total_dim];
}
if (y_glob < (y_total_dim - 1))
{
d_out[location] += 0.25 * d_in[location + x_total_dim];
}
if (x_glob == 0)
{
d_out[location] = 1;
}
}
int main()
{
const int N=200;
const int M=200;
const int ARRAY_SIZE = N * M;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
const int Niter = 1000;
size_t counter = 0;
FILE * writefile;
writefile=fopen("out_laplace.txt", "w");
float h_start[ARRAY_SIZE];
for(int i=0; i<ARRAY_SIZE; i++)
{
h_start[i] = 0;
}
float h_out[ARRAY_SIZE];
float * d_in;
float * d_out;
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
hipMemcpy(d_in, h_start, ARRAY_BYTES, hipMemcpyHostToDevice);
while (counter<Niter)
{
hipLaunchKernelGGL(( heat_step), dim3(dim3(10,10)), dim3(dim3(N/10,M/10)), 0, 0, d_in, d_out);
hipLaunchKernelGGL(( heat_step), dim3(dim3(10,10)), dim3(dim3(N/10,M/10)), 0, 0, d_out, d_in);
counter=counter+2;
}
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
for(int i=0; i<N; i++)
{
for(int j=0; j<M; j++)
{
fprintf(writefile,"%e\t", h_out[i * M + j]);
}
fprintf(writefile, "\n");
}
fclose(writefile);
hipFree(d_in);
hipFree(d_out);
return 0;
}
| 272e11630bb473744b0dae70919efa9d00cfb7ec.cu | #include <stdio.h>
#include <math.h>
__global__ void heat_step(float * d_out, float * d_in)
{
// int block_x = blockIdx.x;
// int block_y = blockIdx.y;
int x_glob;
int y_glob;
int x_total_dim = blockDim.x * gridDim.x;
int y_total_dim = blockDim.y * gridDim.y;
int location;
x_glob = blockDim.x * blockIdx.x + threadIdx.x;
y_glob = blockDim.y * blockIdx.y + threadIdx.y;
location = y_glob * x_total_dim + x_glob;
d_out[location] = 0;
if (x_glob > 0)
{
d_out[location] += 0.25 * d_in[location - 1];
}
if (x_glob < (x_total_dim - 1))
{
d_out[location] += 0.25 * d_in[location + 1];
}
if (y_glob > 0)
{
d_out[location] += 0.25 * d_in[location - x_total_dim];
}
if (y_glob < (y_total_dim - 1))
{
d_out[location] += 0.25 * d_in[location + x_total_dim];
}
if (x_glob == 0)
{
d_out[location] = 1;
}
}
int main()
{
const int N=200;
const int M=200;
const int ARRAY_SIZE = N * M;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
const int Niter = 1000;
size_t counter = 0;
FILE * writefile;
writefile=fopen("out_laplace.txt", "w");
float h_start[ARRAY_SIZE];
for(int i=0; i<ARRAY_SIZE; i++)
{
h_start[i] = 0;
}
float h_out[ARRAY_SIZE];
float * d_in;
float * d_out;
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
cudaMemcpy(d_in, h_start, ARRAY_BYTES, cudaMemcpyHostToDevice);
while (counter<Niter)
{
heat_step<<<dim3(10,10), dim3(N/10,M/10)>>>(d_in, d_out);
heat_step<<<dim3(10,10), dim3(N/10,M/10)>>>(d_out, d_in);
counter=counter+2;
}
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
for(int i=0; i<N; i++)
{
for(int j=0; j<M; j++)
{
fprintf(writefile,"%e\t", h_out[i * M + j]);
}
fprintf(writefile, "\n");
}
fclose(writefile);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
b664d6b12d79184b573a3675131cc8e0cd122f6f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <hip/hip_runtime.h>
#include <fenv.h>
#include <getopt.h>
#include <cassert>
#include <functional>
#include <iostream>
#include <random>
#include <vector>
#include "fbgemm_gpu/bench_utils.cuh"
#include "fbgemm_gpu/cuda_utils.cuh"
#include "fbgemm_gpu/batched_unary_embedding_wrappers.cuh"
void generate_auxiliary_tensors(
int batch_size,
std::vector<int>& hash_sizes,
std::vector<long>& table_offsets,
std::vector<long>& lengths,
std::vector<long>& offsets,
std::vector<long>& indices) {
// generate lengths and indices
std::default_random_engine generator;
std::uniform_real_distribution<float> distribution(0.0, 1.0);
fesetround(FE_TONEAREST);
for (int h = 0; h < hash_sizes.size(); h++) {
for (int i = 0; i < batch_size; i++) {
long n_indices = 1;
indices.push_back(
std::lrintf(distribution(generator) * (hash_sizes[h] - 1)));
lengths.push_back(n_indices);
}
}
// generate offsets
offsets.push_back(0);
long inc_sum = 0;
for (auto const& item : lengths) {
offsets.push_back(inc_sum += item);
}
// generate table_offsets
long inc_table_hash_sum = 0;
table_offsets.push_back(0);
for (auto const& item : hash_sizes) {
table_offsets.push_back(inc_table_hash_sum += item);
}
}
void parse_commandline(
int argc,
char* argv[],
int* batch_size,
int* num_tables,
int* num_tasks,
int* iters) {
static struct option longopts[] = {
{"batch-size", required_argument, NULL, 'b'},
{"num_tables", required_argument, NULL, 't'},
{"num_tasks", required_argument, NULL, 'p'},
{"iters", required_argument, NULL, 'i'}};
int opt;
while ((opt = getopt_long(argc, argv, "b:t:p:i", longopts, NULL)) != -1) {
switch (opt) {
case 'b':
*batch_size = atoi(optarg);
break;
case 't':
*num_tables = atoi(optarg);
break;
case 'p':
*num_tasks = atoi(optarg);
break;
case 'i':
*iters = atoi(optarg);
break;
}
}
std::cout << "batch size: " << *batch_size << std::endl;
std::cout << "number of tables: " << *num_tables << std::endl;
std::cout << "number of tasks: " << *num_tasks << std::endl;
std::cout << "iteration: " << *iters << std::endl;
}
int main(int argc, char* argv[]) {
int batch_size = 512;
int num_tables = 2;
int num_tasks = 3;
int iters = 100;
parse_commandline(argc, argv, &batch_size, &num_tables, &num_tasks, &iters);
// generate hash_sizes
std::vector<int> hash_sizes;
std::default_random_engine generator;
std::uniform_int_distribution<int> distribution(50, 250);
for (int i = 0; i < num_tables; i++) {
hash_sizes.push_back(distribution(generator));
}
std::cout << "table rows: ";
for (auto const& hash_size : hash_sizes) {
std::cout << hash_size << ",";
}
std::cout << std::endl;
// the auxilary tensors
std::vector<long> table_offsets;
std::vector<long> lengths;
std::vector<long> offsets;
std::vector<long> indices;
generate_auxiliary_tensors(
batch_size, hash_sizes, table_offsets, lengths, offsets, indices);
// cache flush utility
// gpu ptrs
float* embedding_table_ptr;
long* table_offsets_ptr;
long* offsets_ptr;
long* indices_ptr;
float* output_ptr;
float* grad_ptr;
float* grad_weight_ptr;
int embedding_rows = 0;
for (auto const& h : hash_sizes) {
embedding_rows += h;
}
CUDA_CHECK(hipMalloc(
&embedding_table_ptr, embedding_rows * num_tasks * sizeof(float)));
// generate embedding table random numbers
generate_random_table(embedding_table_ptr, embedding_rows * num_tasks);
CUDA_CHECK(hipDeviceSynchronize());
CUDA_CHECK(hipGetLastError());
CUDA_CHECK(
hipMalloc(&table_offsets_ptr, table_offsets.size() * sizeof(long)));
CUDA_CHECK(hipMalloc(&offsets_ptr, offsets.size() * sizeof(long)));
CUDA_CHECK(hipMalloc(&indices_ptr, indices.size() * sizeof(long)));
CUDA_CHECK(hipMalloc(
&output_ptr, batch_size * num_tables * num_tasks * sizeof(float)));
CUDA_CHECK(hipGetLastError());
// memcpy
CUDA_CHECK(hipMemcpy(
table_offsets_ptr,
table_offsets.data(),
table_offsets.size() * sizeof(long),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(
offsets_ptr,
offsets.data(),
offsets.size() * sizeof(long),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(
indices_ptr,
indices.data(),
indices.size() * sizeof(long),
hipMemcpyHostToDevice));
// forward
float forward_time = benchmark_function(iters, [&]() {
fbgemm_gpu_test::batched_unary_embeddings_forward(
num_tasks,
batch_size,
num_tables,
embedding_table_ptr,
table_offsets_ptr,
offsets_ptr,
indices_ptr,
output_ptr);
});
// free forward-only gpu ptrs
hipFree(output_ptr);
// backward
hipMalloc(&grad_ptr, batch_size * num_tables * num_tasks * sizeof(float));
generate_random_table(grad_ptr, batch_size * num_tables * num_tasks);
CUDA_CHECK(hipDeviceSynchronize());
hipMalloc(&grad_weight_ptr, embedding_rows * num_tasks * sizeof(float));
float backward_time = benchmark_function(iters, [&]() {
fbgemm_gpu_test::batched_unary_embeddings_backward(
num_tasks,
batch_size,
num_tables,
grad_ptr,
table_offsets_ptr,
offsets_ptr,
indices_ptr,
grad_weight_ptr);
});
// free backward-only gpu ptrs
hipFree(grad_ptr);
hipFree(grad_weight_ptr);
// free other gpu ptrs;
hipFree(embedding_table_ptr);
hipFree(table_offsets_ptr);
hipFree(offsets_ptr);
hipFree(indices_ptr);
hipFree(table_offsets_ptr);
hipFree(table_offsets_ptr);
std::cout << "Average Forward Pass Execution time per iteration: "
<< forward_time << " ms" << std::endl;
std::cout << "Forward Pass Memory Bandwidth: "
<< (num_tasks * num_tables * batch_size *
(5 * sizeof(long) + 2 * sizeof(float))) /
(forward_time * 1e-3) / 1e9
<< " GB/s" << std::endl;
std::cout << "Average Backward Pass Execution time per iteration: "
<< backward_time << " ms" << std::endl;
std::cout << "Backward Pass Memory Bandwidth: "
<< (num_tasks * num_tables * batch_size *
(5 * sizeof(long) + 2 * sizeof(float))) /
(backward_time * 1e-3) / 1e9
<< " GB/s" << std::endl;
}
| b664d6b12d79184b573a3675131cc8e0cd122f6f.cu | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <cuda.h>
#include <fenv.h>
#include <getopt.h>
#include <cassert>
#include <functional>
#include <iostream>
#include <random>
#include <vector>
#include "fbgemm_gpu/bench_utils.cuh"
#include "fbgemm_gpu/cuda_utils.cuh"
#include "fbgemm_gpu/batched_unary_embedding_wrappers.cuh"
void generate_auxiliary_tensors(
int batch_size,
std::vector<int>& hash_sizes,
std::vector<long>& table_offsets,
std::vector<long>& lengths,
std::vector<long>& offsets,
std::vector<long>& indices) {
// generate lengths and indices
std::default_random_engine generator;
std::uniform_real_distribution<float> distribution(0.0, 1.0);
fesetround(FE_TONEAREST);
for (int h = 0; h < hash_sizes.size(); h++) {
for (int i = 0; i < batch_size; i++) {
long n_indices = 1;
indices.push_back(
std::lrintf(distribution(generator) * (hash_sizes[h] - 1)));
lengths.push_back(n_indices);
}
}
// generate offsets
offsets.push_back(0);
long inc_sum = 0;
for (auto const& item : lengths) {
offsets.push_back(inc_sum += item);
}
// generate table_offsets
long inc_table_hash_sum = 0;
table_offsets.push_back(0);
for (auto const& item : hash_sizes) {
table_offsets.push_back(inc_table_hash_sum += item);
}
}
void parse_commandline(
int argc,
char* argv[],
int* batch_size,
int* num_tables,
int* num_tasks,
int* iters) {
static struct option longopts[] = {
{"batch-size", required_argument, NULL, 'b'},
{"num_tables", required_argument, NULL, 't'},
{"num_tasks", required_argument, NULL, 'p'},
{"iters", required_argument, NULL, 'i'}};
int opt;
while ((opt = getopt_long(argc, argv, "b:t:p:i", longopts, NULL)) != -1) {
switch (opt) {
case 'b':
*batch_size = atoi(optarg);
break;
case 't':
*num_tables = atoi(optarg);
break;
case 'p':
*num_tasks = atoi(optarg);
break;
case 'i':
*iters = atoi(optarg);
break;
}
}
std::cout << "batch size: " << *batch_size << std::endl;
std::cout << "number of tables: " << *num_tables << std::endl;
std::cout << "number of tasks: " << *num_tasks << std::endl;
std::cout << "iteration: " << *iters << std::endl;
}
int main(int argc, char* argv[]) {
int batch_size = 512;
int num_tables = 2;
int num_tasks = 3;
int iters = 100;
parse_commandline(argc, argv, &batch_size, &num_tables, &num_tasks, &iters);
// generate hash_sizes
std::vector<int> hash_sizes;
std::default_random_engine generator;
std::uniform_int_distribution<int> distribution(50, 250);
for (int i = 0; i < num_tables; i++) {
hash_sizes.push_back(distribution(generator));
}
std::cout << "table rows: ";
for (auto const& hash_size : hash_sizes) {
std::cout << hash_size << ",";
}
std::cout << std::endl;
// the auxilary tensors
std::vector<long> table_offsets;
std::vector<long> lengths;
std::vector<long> offsets;
std::vector<long> indices;
generate_auxiliary_tensors(
batch_size, hash_sizes, table_offsets, lengths, offsets, indices);
// cache flush utility
// gpu ptrs
float* embedding_table_ptr;
long* table_offsets_ptr;
long* offsets_ptr;
long* indices_ptr;
float* output_ptr;
float* grad_ptr;
float* grad_weight_ptr;
int embedding_rows = 0;
for (auto const& h : hash_sizes) {
embedding_rows += h;
}
CUDA_CHECK(cudaMalloc(
&embedding_table_ptr, embedding_rows * num_tasks * sizeof(float)));
// generate embedding table random numbers
generate_random_table(embedding_table_ptr, embedding_rows * num_tasks);
CUDA_CHECK(cudaDeviceSynchronize());
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(
cudaMalloc(&table_offsets_ptr, table_offsets.size() * sizeof(long)));
CUDA_CHECK(cudaMalloc(&offsets_ptr, offsets.size() * sizeof(long)));
CUDA_CHECK(cudaMalloc(&indices_ptr, indices.size() * sizeof(long)));
CUDA_CHECK(cudaMalloc(
&output_ptr, batch_size * num_tables * num_tasks * sizeof(float)));
CUDA_CHECK(cudaGetLastError());
// memcpy
CUDA_CHECK(cudaMemcpy(
table_offsets_ptr,
table_offsets.data(),
table_offsets.size() * sizeof(long),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(
offsets_ptr,
offsets.data(),
offsets.size() * sizeof(long),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(
indices_ptr,
indices.data(),
indices.size() * sizeof(long),
cudaMemcpyHostToDevice));
// forward
float forward_time = benchmark_function(iters, [&]() {
fbgemm_gpu_test::batched_unary_embeddings_forward(
num_tasks,
batch_size,
num_tables,
embedding_table_ptr,
table_offsets_ptr,
offsets_ptr,
indices_ptr,
output_ptr);
});
// free forward-only gpu ptrs
cudaFree(output_ptr);
// backward
cudaMalloc(&grad_ptr, batch_size * num_tables * num_tasks * sizeof(float));
generate_random_table(grad_ptr, batch_size * num_tables * num_tasks);
CUDA_CHECK(cudaDeviceSynchronize());
cudaMalloc(&grad_weight_ptr, embedding_rows * num_tasks * sizeof(float));
float backward_time = benchmark_function(iters, [&]() {
fbgemm_gpu_test::batched_unary_embeddings_backward(
num_tasks,
batch_size,
num_tables,
grad_ptr,
table_offsets_ptr,
offsets_ptr,
indices_ptr,
grad_weight_ptr);
});
// free backward-only gpu ptrs
cudaFree(grad_ptr);
cudaFree(grad_weight_ptr);
// free other gpu ptrs;
cudaFree(embedding_table_ptr);
cudaFree(table_offsets_ptr);
cudaFree(offsets_ptr);
cudaFree(indices_ptr);
cudaFree(table_offsets_ptr);
cudaFree(table_offsets_ptr);
std::cout << "Average Forward Pass Execution time per iteration: "
<< forward_time << " ms" << std::endl;
std::cout << "Forward Pass Memory Bandwidth: "
<< (num_tasks * num_tables * batch_size *
(5 * sizeof(long) + 2 * sizeof(float))) /
(forward_time * 1e-3) / 1e9
<< " GB/s" << std::endl;
std::cout << "Average Backward Pass Execution time per iteration: "
<< backward_time << " ms" << std::endl;
std::cout << "Backward Pass Memory Bandwidth: "
<< (num_tasks * num_tables * batch_size *
(5 * sizeof(long) + 2 * sizeof(float))) /
(backward_time * 1e-3) / 1e9
<< " GB/s" << std::endl;
}
|
577a8817116766731516ab105a43cab408ebca75.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zmergecg.cu, normal z -> s, Thu Oct 8 23:05:50 2020
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
#if TORCH_HIP_VERSION >= 11000
// todo: destroy descriptor and see if the original code descriptors have to be changed
#define hipsparseScsrmv(handle, op, rows, cols, nnz, alpha, descr, dval, drow, dcol, x, beta, y) \
{ \
hipsparseSpMatDescr_t descrA; \
hipsparseDnVecDescr_t descrX, descrY; \
hipsparseCreateCsr(&descrA, rows, cols, nnz, \
(void *)drow, (void *)dcol, (void *)dval, \
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, \
HIPSPARSE_INDEX_BASE_ZERO, HIP_R_32F); \
hipsparseCreateDnVec(&descrX, cols, x, HIP_R_32F); \
hipsparseCreateDnVec(&descrY, rows, y, HIP_R_32F); \
\
size_t bufsize; \
void *buf; \
hipsparseSpMV_bufferSize(handle, op, \
(void *)alpha, descrA, descrX, (void *)beta, \
descrY, HIP_R_32F, HIPSPARSE_CSRMV_ALG1, &bufsize); \
if (bufsize > 0) \
magma_malloc(&buf, bufsize); \
hipsparseSpMV( handle, op, \
(void *)alpha, descrA, descrX, (void *)beta, \
descrY, HIP_R_32F, HIPSPARSE_CSRMV_ALG1, buf); \
if (bufsize > 0) \
magma_free(buf); \
}
#endif
// These routines merge multiple kernels from smergecg into one
// for a description see
// "Reformulated Conjugate Gradient for the Energy-Aware
// Solution of Linear Systems on GPUs (ICPP '13)
// accelerated reduction for one vector
__global__ void
magma_scgreduce_kernel_spmv1(
int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_S_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_S_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated reduction for two vectors
__global__ void
magma_scgreduce_kernel_spmv2(
int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_S_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_S_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
// computes the SpMV using CSR and the first step of the reduction
__global__ void
magma_scgmerge_spmvcsr_kernel(
int n,
float * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
float * d,
float * z,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if( i<n ) {
float dot = MAGMA_S_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * d[ dcolind[j] ];
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELL and the first step of the reduction
__global__ void
magma_scgmerge_spmvell_kernel(
int n,
int num_cols_per_row,
float * dval,
magma_index_t * dcolind,
float * d,
float * z,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row; k++ ) {
int col = dcolind [ n * k + i ];
float val = dval [ n * k + i ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLPACK and the first step of the reduction
__global__ void
magma_scgmerge_spmvellpack_kernel(
int n,
int num_cols_per_row,
float * dval,
magma_index_t * dcolind,
float * d,
float * z,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row; k++ ) {
int col = dcolind [ num_cols_per_row * i + k ];
float val = dval [ num_cols_per_row * i + k ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELL alignment 1 and the first step of the reduction
__global__ void
magma_scgmerge_spmvell_kernelb1(
int n,
int blocksize,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
int idx = threadIdx.x; // local row
int bdx = blockIdx.x; // global block index
int row = bdx * 256 + idx; // global row index
// int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) );
int lrow = threadIdx.x%blocksize; // local row;
if( row < n ) {
int offset = drowptr[ row/blocksize ];
int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize;
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++) {
int col = dcolind [ offset+ blocksize * n + lrow ];
float val = dval[ offset+ blocksize * n + lrow ];
dot = dot + val * d [ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
/*
if(i < n ) {
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int k = 0; k < border; k++){
int col = dcolind [ offset+ blocksize * k + threadIdx.x ];
float val = dval[offset+ blocksize * k + threadIdx.x];
if( val != 0){
dot += val*d[col];
}
}
//float dot = MAGMA_S_MAKE(0.0, 0.0);
//for ( int k = 0; k < num_cols_per_row; k++ ) {
// int col = dcolind [ n * k + i ];
// float val = dval [ n * k + i ];
// if( val != 0)
// dot += val * d[ col ];
//}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}*/
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_scgmerge_spmvellpackrt_kernel_8(
int n,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
float * d,
float * z,
float * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ float shared[];
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//float val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
float val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 4 ) {
shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_scgmerge_spmvellpackrt_kernel_16(
int n,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
float * d,
float * z,
float * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ float shared[];
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//float val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
float val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 8 ) {
shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_scgmerge_spmvellpackrt_kernel_32(
int n,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
float * d,
float * z,
float * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ float shared[];
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//float val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
float val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 16 ) {
shared[idb]+=shared[idb+16];
if( idp < 8 ) shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// additional kernel necessary to compute first reduction step
__global__ void
magma_scgmerge_spmvellpackrt_kernel2(
int n,
float * z,
float * d,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_S_MAKE(0.0, 0.0);
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELLC
__global__ void
magma_scgmerge_spmvsellc_kernel(
int num_rows,
int blocksize,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if(i < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n ++) {
int col = dcolind [offset+ blocksize * n + Idx ];
float val = dval[offset+ blocksize * n + Idx];
if( val != 0) {
dot=dot+val*d[col];
}
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_scgmerge_spmvsellpt_kernel_8(
int num_rows,
int blocksize,
int T,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_scgmerge_spmvsellpt_kernel_16(
int num_rows,
int blocksize,
int T,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_scgmerge_spmvsellpt_kernel_32(
int num_rows,
int blocksize,
int T,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// kernel to handle scalars
__global__ void // rho = beta/tmp; gamma = beta;
magma_scg_rhokernel(
float * skp ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ) {
float tmp = skp[1];
skp[3] = tmp/skp[4];
skp[2] = tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using different formats with the dot product
and the computation of rho
Arguments
---------
@param[in]
A magma_s_matrix
input matrix
@param[in]
d1 magmaFloat_ptr
temporary vector
@param[in]
d2 magmaFloat_ptr
temporary vector
@param[in]
dd magmaFloat_ptr
input vector d
@param[out]
dz magmaFloat_ptr
input vector z
@param[out]
skp magmaFloat_ptr
array for parameters ( skp[3]=rho )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_scgmerge_spmv1(
magma_s_matrix A,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr dd,
magmaFloat_ptr dz,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( A.num_rows, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( float );
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR )
hipLaunchKernelGGL(( magma_scgmerge_spmvcsr_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELLPACKT )
hipLaunchKernelGGL(( magma_scgmerge_spmvellpack_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELL )
hipLaunchKernelGGL(( magma_scgmerge_spmvell_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_CUCSR ) {
hipsparseHandle_t cusparseHandle = 0;
hipsparseMatDescr_t descr = 0;
float c_one = MAGMA_S_ONE;
float c_zero = MAGMA_S_ZERO;
hipsparseCreate( &cusparseHandle );
hipsparseSetStream( cusparseHandle, queue->cuda_stream() );
hipsparseCreateMatDescr( &descr );
hipsparseSetMatType( descr, HIPSPARSE_MATRIX_TYPE_GENERAL );
hipsparseSetMatIndexBase( descr, HIPSPARSE_INDEX_BASE_ZERO );
hipsparseScsrmv( cusparseHandle,HIPSPARSE_OPERATION_NON_TRANSPOSE,
A.num_rows, A.num_cols, A.nnz, &c_one, descr,
A.dval, A.drow, A.dcol, dd, &c_zero, dz );
hipsparseDestroyMatDescr( descr );
hipsparseDestroy( cusparseHandle );
cusparseHandle = 0;
descr = 0;
hipLaunchKernelGGL(( magma_scgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, dz, dd, d1 );
}
else if ( A.storage_type == Magma_SELLP && A.alignment == 1 ) {
hipLaunchKernelGGL(( magma_scgmerge_spmvell_kernelb1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, A.blocksize,
A.dval, A.dcol, A.drow, dd, dz, d1 );
}
else if ( A.storage_type == Magma_SELLP && A.alignment > 1) {
int num_threadssellp = A.blocksize*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threadssellp > 256 )
printf("error: too much shared memory requested.\n");
dim3 block( A.blocksize, A.alignment, 1);
int dimgrid1 = int( sqrt( float( A.numblocks )));
int dimgrid2 = magma_ceildiv( A.numblocks, dimgrid1 );
dim3 gridsellp( dimgrid1, dimgrid2, 1);
int Mssellp = num_threadssellp * sizeof( float );
if ( A.alignment == 8)
hipLaunchKernelGGL(( magma_scgmerge_spmvsellpt_kernel_8)
, dim3(gridsellp), dim3(block), Mssellp, queue->cuda_stream() ,
A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 16)
hipLaunchKernelGGL(( magma_scgmerge_spmvsellpt_kernel_16)
, dim3(gridsellp), dim3(block), Mssellp, queue->cuda_stream() ,
A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 32)
hipLaunchKernelGGL(( magma_scgmerge_spmvsellpt_kernel_32)
, dim3(gridsellp), dim3(block), Mssellp, queue->cuda_stream() ,
A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else
printf("error: alignment not supported.\n");
// in case of using SELLP, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
hipLaunchKernelGGL(( magma_scgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, dz, dd, d1 );
}
else if ( A.storage_type == Magma_ELLRT ) {
// in case of using ELLRT, we need a different grid, assigning
// threads_per_row processors to each row
// the block size is num_threads
// fixed values
int num_blocks = magma_ceildiv( A.num_rows, A.blocksize );
int num_threads = A.alignment*A.blocksize;
int real_row_length = magma_roundup( A.max_nnz_row, A.alignment );
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = int( sqrt( float( num_blocks )));
int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 );
dim3 gridellrt( dimgrid1, dimgrid2, 1);
int Mellrt = A.alignment * A.blocksize * sizeof( float );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms);
if ( A.alignment == 32 ) {
hipLaunchKernelGGL(( magma_scgmerge_spmvellpackrt_kernel_32)
, dim3(gridellrt), dim3(num_threads) , Mellrt, queue->cuda_stream() ,
A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 16 ) {
hipLaunchKernelGGL(( magma_scgmerge_spmvellpackrt_kernel_16)
, dim3(gridellrt), dim3(num_threads) , Mellrt, queue->cuda_stream() ,
A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 8 ) {
hipLaunchKernelGGL(( magma_scgmerge_spmvellpackrt_kernel_8)
, dim3(gridellrt), dim3(num_threads) , Mellrt, queue->cuda_stream() ,
A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else {
printf("error: alignment %d not supported.\n", int(A.alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
// in case of using ELLRT, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
hipLaunchKernelGGL(( magma_scgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, dz, dd, d1 );
}
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_scgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, A.num_rows, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+4, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_scg_rhokernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r and computes the first part of the dot product r*r
__global__ void
magma_scgmerge_xrbeta_kernel(
int n,
float * x,
float * r,
float * d,
float * z,
float * skp,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
float rho = skp[3];
float mrho = MAGMA_S_MAKE( -1.0, 0.0)*rho;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
temp[ Idx ] = r[i] * r[i];
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// kernel to handle scalars
__global__ void //alpha = beta / gamma
magma_scg_alphabetakernel(
float * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ) {
float tmp1 = skp[1];
skp[0] = tmp1/skp[2];
//printf("beta=%e\n", MAGMA_S_REAL(tmp1));
}
}
// update search Krylov vector d
__global__ void
magma_scg_d_kernel(
int n,
float * skp,
float * r,
float * d )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
float alpha = skp[0];
if( i<n ) {
d[i] = r[i] + alpha * d[i];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloat_ptr
temporary vector
@param[in]
d2 magmaFloat_ptr
temporary vector
@param[in,out]
dx magmaFloat_ptr
input vector x
@param[in,out]
dr magmaFloat_ptr
input/output vector r
@param[in]
dd magmaFloat_ptr
input vector d
@param[in]
dz magmaFloat_ptr
input vector z
@param[in]
skp magmaFloat_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_ssygpuk
********************************************************************/
extern "C" magma_int_t
magma_scgmerge_xrbeta(
magma_int_t n,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr dx,
magmaFloat_ptr dr,
magmaFloat_ptr dd,
magmaFloat_ptr dz,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( float );
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_scgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, dx, dr, dd, dz, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_scgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+1, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_scg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
hipLaunchKernelGGL(( magma_scg_d_kernel), dim3(Gs3), dim3(Bs3), 0, queue->cuda_stream(), n, skp, dr, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r
__global__ void
magma_spcgmerge_xrbeta_kernel(
int n,
float * x,
float * r,
float * d,
float * z,
float * skp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
float rho = skp[3];
float mrho = MAGMA_S_MAKE( -1.0, 0.0)*rho;
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
}
}
// dot product for multiple vectors
__global__ void
magma_smsdot_one_kernel_1(
int n,
float * v0,
float * w0,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 1 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_S_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v0[ i ] * v0[ i ] : MAGMA_S_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
vtmp[ blockIdx.x+n ] = temp[ blockDim.x ];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in,out]
dx magmaFloat_ptr
input vector x
@param[in,out]
dr magmaFloat_ptr
input/output vector r
@param[in]
dd magmaFloat_ptr
input vector d
@param[in]
dz magmaFloat_ptr
input vector z
@param[in]
skp magmaFloat_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_ssygpuk
********************************************************************/
extern "C" magma_int_t
magma_spcgmerge_xrbeta1(
magma_int_t n,
magmaFloat_ptr dx,
magmaFloat_ptr dr,
magmaFloat_ptr dd,
magmaFloat_ptr dz,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
hipLaunchKernelGGL(( magma_spcgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream(),
n, dx, dr, dd, dz, skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloat_ptr
temporary vector
@param[in]
d2 magmaFloat_ptr
temporary vector
@param[in]
dh magmaFloat_ptr
input vector x
@param[in]
dr magmaFloat_ptr
input/output vector r
@param[in]
dd magmaFloat_ptr
input/output vector d
@param[in]
skp magmaFloat_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_ssygpuk
********************************************************************/
extern "C" magma_int_t
magma_spcgmerge_xrbeta2(
magma_int_t n,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr dh,
magmaFloat_ptr dr,
magmaFloat_ptr dd,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 4*local_block_size * sizeof( float );
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_smsdot_one_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, dr, dh, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_scgreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+1, 1, queue );
magma_scopyvector( 1, aux1+n, 1, skp+6, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_scg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
hipLaunchKernelGGL(( magma_scg_d_kernel), dim3(Gs3), dim3(Bs3), 0, queue->cuda_stream(), n, skp, dh, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r
__global__ void
magma_sjcgmerge_xrbeta_kernel(
int n,
float * diag,
float * x,
float * r,
float * d,
float * z,
float * h,
float * vtmp,
float * skp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
float rho = skp[3];
float mrho = MAGMA_S_MAKE( -1.0, 0.0)*rho;
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
h[i] = r[i] * diag[i];
}
__syncthreads();
temp[ Idx ] = ( i < n ) ?
h[ i ] * r[ i ] : MAGMA_S_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
r[ i ] * r[ i ] : MAGMA_S_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
vtmp[ blockIdx.x+n ] = temp[ blockDim.x ];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloat_ptr
temporary vector
@param[in]
d2 magmaFloat_ptr
temporary vector
@param[in]
diag magmaFloat_ptr
inverse diagonal (Jacobi preconditioner)
@param[in]
dx magmaFloat_ptr
iteration vector x
@param[in]
dr magmaFloat_ptr
input/output vector r
@param[in]
dd magmaFloat_ptr
input vector d
@param[in]
dz magmaFloat_ptr
input vector z
@param[in]
dh magmaFloat_ptr
input vector h
@param[in]
skp magmaFloat_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_ssygpuk
********************************************************************/
extern "C" magma_int_t
magma_sjcgmerge_xrbeta(
magma_int_t n,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr diag,
magmaFloat_ptr dx,
magmaFloat_ptr dr,
magmaFloat_ptr dd,
magmaFloat_ptr dz,
magmaFloat_ptr dh,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 4*local_block_size * sizeof( float );
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_sjcgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
n, diag, dx, dr, dd, dz, dh, d1, skp );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_scgreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+1, 1, queue );
magma_scopyvector( 1, aux1+n, 1, skp+6, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_scg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
hipLaunchKernelGGL(( magma_scg_d_kernel), dim3(Gs3), dim3(Bs3), 0, queue->cuda_stream(), n, skp, dh, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
| 577a8817116766731516ab105a43cab408ebca75.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zmergecg.cu, normal z -> s, Thu Oct 8 23:05:50 2020
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
#if CUDA_VERSION >= 11000
// todo: destroy descriptor and see if the original code descriptors have to be changed
#define cusparseScsrmv(handle, op, rows, cols, nnz, alpha, descr, dval, drow, dcol, x, beta, y) \
{ \
cusparseSpMatDescr_t descrA; \
cusparseDnVecDescr_t descrX, descrY; \
cusparseCreateCsr(&descrA, rows, cols, nnz, \
(void *)drow, (void *)dcol, (void *)dval, \
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, \
CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F); \
cusparseCreateDnVec(&descrX, cols, x, CUDA_R_32F); \
cusparseCreateDnVec(&descrY, rows, y, CUDA_R_32F); \
\
size_t bufsize; \
void *buf; \
cusparseSpMV_bufferSize(handle, op, \
(void *)alpha, descrA, descrX, (void *)beta, \
descrY, CUDA_R_32F, CUSPARSE_CSRMV_ALG1, &bufsize); \
if (bufsize > 0) \
magma_malloc(&buf, bufsize); \
cusparseSpMV( handle, op, \
(void *)alpha, descrA, descrX, (void *)beta, \
descrY, CUDA_R_32F, CUSPARSE_CSRMV_ALG1, buf); \
if (bufsize > 0) \
magma_free(buf); \
}
#endif
// These routines merge multiple kernels from smergecg into one
// for a description see
// "Reformulated Conjugate Gradient for the Energy-Aware
// Solution of Linear Systems on GPUs (ICPP '13)
// accelerated reduction for one vector
__global__ void
magma_scgreduce_kernel_spmv1(
int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_S_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_S_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated reduction for two vectors
__global__ void
magma_scgreduce_kernel_spmv2(
int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_S_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_S_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
// computes the SpMV using CSR and the first step of the reduction
__global__ void
magma_scgmerge_spmvcsr_kernel(
int n,
float * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
float * d,
float * z,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if( i<n ) {
float dot = MAGMA_S_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * d[ dcolind[j] ];
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELL and the first step of the reduction
__global__ void
magma_scgmerge_spmvell_kernel(
int n,
int num_cols_per_row,
float * dval,
magma_index_t * dcolind,
float * d,
float * z,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row; k++ ) {
int col = dcolind [ n * k + i ];
float val = dval [ n * k + i ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLPACK and the first step of the reduction
__global__ void
magma_scgmerge_spmvellpack_kernel(
int n,
int num_cols_per_row,
float * dval,
magma_index_t * dcolind,
float * d,
float * z,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row; k++ ) {
int col = dcolind [ num_cols_per_row * i + k ];
float val = dval [ num_cols_per_row * i + k ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELL alignment 1 and the first step of the reduction
__global__ void
magma_scgmerge_spmvell_kernelb1(
int n,
int blocksize,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
int idx = threadIdx.x; // local row
int bdx = blockIdx.x; // global block index
int row = bdx * 256 + idx; // global row index
// int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) );
int lrow = threadIdx.x%blocksize; // local row;
if( row < n ) {
int offset = drowptr[ row/blocksize ];
int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize;
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++) {
int col = dcolind [ offset+ blocksize * n + lrow ];
float val = dval[ offset+ blocksize * n + lrow ];
dot = dot + val * d [ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
/*
if(i < n ) {
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int k = 0; k < border; k++){
int col = dcolind [ offset+ blocksize * k + threadIdx.x ];
float val = dval[offset+ blocksize * k + threadIdx.x];
if( val != 0){
dot += val*d[col];
}
}
//float dot = MAGMA_S_MAKE(0.0, 0.0);
//for ( int k = 0; k < num_cols_per_row; k++ ) {
// int col = dcolind [ n * k + i ];
// float val = dval [ n * k + i ];
// if( val != 0)
// dot += val * d[ col ];
//}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}*/
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_scgmerge_spmvellpackrt_kernel_8(
int n,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
float * d,
float * z,
float * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ float shared[];
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//float val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
float val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 4 ) {
shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_scgmerge_spmvellpackrt_kernel_16(
int n,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
float * d,
float * z,
float * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ float shared[];
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//float val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
float val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 8 ) {
shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_scgmerge_spmvellpackrt_kernel_32(
int n,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
float * d,
float * z,
float * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ float shared[];
if(i < n ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//float val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
float val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 16 ) {
shared[idb]+=shared[idb+16];
if( idp < 8 ) shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// additional kernel necessary to compute first reduction step
__global__ void
magma_scgmerge_spmvellpackrt_kernel2(
int n,
float * z,
float * d,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_S_MAKE(0.0, 0.0);
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELLC
__global__ void
magma_scgmerge_spmvsellc_kernel(
int num_rows,
int blocksize,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if(i < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n ++) {
int col = dcolind [offset+ blocksize * n + Idx ];
float val = dval[offset+ blocksize * n + Idx];
if( val != 0) {
dot=dot+val*d[col];
}
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_scgmerge_spmvsellpt_kernel_8(
int num_rows,
int blocksize,
int T,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_scgmerge_spmvsellpt_kernel_16(
int num_rows,
int blocksize,
int T,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_scgmerge_spmvsellpt_kernel_32(
int num_rows,
int blocksize,
int T,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * d,
float * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// kernel to handle scalars
__global__ void // rho = beta/tmp; gamma = beta;
magma_scg_rhokernel(
float * skp ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ) {
float tmp = skp[1];
skp[3] = tmp/skp[4];
skp[2] = tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using different formats with the dot product
and the computation of rho
Arguments
---------
@param[in]
A magma_s_matrix
input matrix
@param[in]
d1 magmaFloat_ptr
temporary vector
@param[in]
d2 magmaFloat_ptr
temporary vector
@param[in]
dd magmaFloat_ptr
input vector d
@param[out]
dz magmaFloat_ptr
input vector z
@param[out]
skp magmaFloat_ptr
array for parameters ( skp[3]=rho )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_scgmerge_spmv1(
magma_s_matrix A,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr dd,
magmaFloat_ptr dz,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( A.num_rows, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( float );
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR )
magma_scgmerge_spmvcsr_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELLPACKT )
magma_scgmerge_spmvellpack_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELL )
magma_scgmerge_spmvell_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_CUCSR ) {
cusparseHandle_t cusparseHandle = 0;
cusparseMatDescr_t descr = 0;
float c_one = MAGMA_S_ONE;
float c_zero = MAGMA_S_ZERO;
cusparseCreate( &cusparseHandle );
cusparseSetStream( cusparseHandle, queue->cuda_stream() );
cusparseCreateMatDescr( &descr );
cusparseSetMatType( descr, CUSPARSE_MATRIX_TYPE_GENERAL );
cusparseSetMatIndexBase( descr, CUSPARSE_INDEX_BASE_ZERO );
cusparseScsrmv( cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE,
A.num_rows, A.num_cols, A.nnz, &c_one, descr,
A.dval, A.drow, A.dcol, dd, &c_zero, dz );
cusparseDestroyMatDescr( descr );
cusparseDestroy( cusparseHandle );
cusparseHandle = 0;
descr = 0;
magma_scgmerge_spmvellpackrt_kernel2<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, dz, dd, d1 );
}
else if ( A.storage_type == Magma_SELLP && A.alignment == 1 ) {
magma_scgmerge_spmvell_kernelb1<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, A.blocksize,
A.dval, A.dcol, A.drow, dd, dz, d1 );
}
else if ( A.storage_type == Magma_SELLP && A.alignment > 1) {
int num_threadssellp = A.blocksize*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threadssellp > 256 )
printf("error: too much shared memory requested.\n");
dim3 block( A.blocksize, A.alignment, 1);
int dimgrid1 = int( sqrt( float( A.numblocks )));
int dimgrid2 = magma_ceildiv( A.numblocks, dimgrid1 );
dim3 gridsellp( dimgrid1, dimgrid2, 1);
int Mssellp = num_threadssellp * sizeof( float );
if ( A.alignment == 8)
magma_scgmerge_spmvsellpt_kernel_8
<<< gridsellp, block, Mssellp, queue->cuda_stream() >>>
( A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 16)
magma_scgmerge_spmvsellpt_kernel_16
<<< gridsellp, block, Mssellp, queue->cuda_stream() >>>
( A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 32)
magma_scgmerge_spmvsellpt_kernel_32
<<< gridsellp, block, Mssellp, queue->cuda_stream() >>>
( A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else
printf("error: alignment not supported.\n");
// in case of using SELLP, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
magma_scgmerge_spmvellpackrt_kernel2<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, dz, dd, d1 );
}
else if ( A.storage_type == Magma_ELLRT ) {
// in case of using ELLRT, we need a different grid, assigning
// threads_per_row processors to each row
// the block size is num_threads
// fixed values
int num_blocks = magma_ceildiv( A.num_rows, A.blocksize );
int num_threads = A.alignment*A.blocksize;
int real_row_length = magma_roundup( A.max_nnz_row, A.alignment );
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = int( sqrt( float( num_blocks )));
int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 );
dim3 gridellrt( dimgrid1, dimgrid2, 1);
int Mellrt = A.alignment * A.blocksize * sizeof( float );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms);
if ( A.alignment == 32 ) {
magma_scgmerge_spmvellpackrt_kernel_32
<<< gridellrt, num_threads , Mellrt, queue->cuda_stream() >>>
( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 16 ) {
magma_scgmerge_spmvellpackrt_kernel_16
<<< gridellrt, num_threads , Mellrt, queue->cuda_stream() >>>
( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 8 ) {
magma_scgmerge_spmvellpackrt_kernel_8
<<< gridellrt, num_threads , Mellrt, queue->cuda_stream() >>>
( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else {
printf("error: alignment %d not supported.\n", int(A.alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
// in case of using ELLRT, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
magma_scgmerge_spmvellpackrt_kernel2<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, dz, dd, d1 );
}
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_scgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, A.num_rows, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+4, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_scg_rhokernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r and computes the first part of the dot product r*r
__global__ void
magma_scgmerge_xrbeta_kernel(
int n,
float * x,
float * r,
float * d,
float * z,
float * skp,
float * vtmp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
float rho = skp[3];
float mrho = MAGMA_S_MAKE( -1.0, 0.0)*rho;
temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0);
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
temp[ Idx ] = r[i] * r[i];
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// kernel to handle scalars
__global__ void //alpha = beta / gamma
magma_scg_alphabetakernel(
float * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ) {
float tmp1 = skp[1];
skp[0] = tmp1/skp[2];
//printf("beta=%e\n", MAGMA_S_REAL(tmp1));
}
}
// update search Krylov vector d
__global__ void
magma_scg_d_kernel(
int n,
float * skp,
float * r,
float * d )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
float alpha = skp[0];
if( i<n ) {
d[i] = r[i] + alpha * d[i];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloat_ptr
temporary vector
@param[in]
d2 magmaFloat_ptr
temporary vector
@param[in,out]
dx magmaFloat_ptr
input vector x
@param[in,out]
dr magmaFloat_ptr
input/output vector r
@param[in]
dd magmaFloat_ptr
input vector d
@param[in]
dz magmaFloat_ptr
input vector z
@param[in]
skp magmaFloat_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_ssygpuk
********************************************************************/
extern "C" magma_int_t
magma_scgmerge_xrbeta(
magma_int_t n,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr dx,
magmaFloat_ptr dr,
magmaFloat_ptr dd,
magmaFloat_ptr dz,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( float );
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_scgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, dx, dr, dd, dz, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_scgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+1, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_scg_alphabetakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
magma_scg_d_kernel<<< Gs3, Bs3, 0, queue->cuda_stream()>>>( n, skp, dr, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r
__global__ void
magma_spcgmerge_xrbeta_kernel(
int n,
float * x,
float * r,
float * d,
float * z,
float * skp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
float rho = skp[3];
float mrho = MAGMA_S_MAKE( -1.0, 0.0)*rho;
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
}
}
// dot product for multiple vectors
__global__ void
magma_smsdot_one_kernel_1(
int n,
float * v0,
float * w0,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 1 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_S_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v0[ i ] * v0[ i ] : MAGMA_S_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
vtmp[ blockIdx.x+n ] = temp[ blockDim.x ];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in,out]
dx magmaFloat_ptr
input vector x
@param[in,out]
dr magmaFloat_ptr
input/output vector r
@param[in]
dd magmaFloat_ptr
input vector d
@param[in]
dz magmaFloat_ptr
input vector z
@param[in]
skp magmaFloat_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_ssygpuk
********************************************************************/
extern "C" magma_int_t
magma_spcgmerge_xrbeta1(
magma_int_t n,
magmaFloat_ptr dx,
magmaFloat_ptr dr,
magmaFloat_ptr dd,
magmaFloat_ptr dz,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
magma_spcgmerge_xrbeta_kernel<<< Gs, Bs, 0, queue->cuda_stream()>>>
( n, dx, dr, dd, dz, skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloat_ptr
temporary vector
@param[in]
d2 magmaFloat_ptr
temporary vector
@param[in]
dh magmaFloat_ptr
input vector x
@param[in]
dr magmaFloat_ptr
input/output vector r
@param[in]
dd magmaFloat_ptr
input/output vector d
@param[in]
skp magmaFloat_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_ssygpuk
********************************************************************/
extern "C" magma_int_t
magma_spcgmerge_xrbeta2(
magma_int_t n,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr dh,
magmaFloat_ptr dr,
magmaFloat_ptr dd,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 4*local_block_size * sizeof( float );
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_smsdot_one_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, dr, dh, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_scgreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+1, 1, queue );
magma_scopyvector( 1, aux1+n, 1, skp+6, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_scg_alphabetakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
magma_scg_d_kernel<<< Gs3, Bs3, 0, queue->cuda_stream()>>>( n, skp, dh, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r
__global__ void
magma_sjcgmerge_xrbeta_kernel(
int n,
float * diag,
float * x,
float * r,
float * d,
float * z,
float * h,
float * vtmp,
float * skp )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
float rho = skp[3];
float mrho = MAGMA_S_MAKE( -1.0, 0.0)*rho;
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
h[i] = r[i] * diag[i];
}
__syncthreads();
temp[ Idx ] = ( i < n ) ?
h[ i ] * r[ i ] : MAGMA_S_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
r[ i ] * r[ i ] : MAGMA_S_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
vtmp[ blockIdx.x+n ] = temp[ blockDim.x ];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloat_ptr
temporary vector
@param[in]
d2 magmaFloat_ptr
temporary vector
@param[in]
diag magmaFloat_ptr
inverse diagonal (Jacobi preconditioner)
@param[in]
dx magmaFloat_ptr
iteration vector x
@param[in]
dr magmaFloat_ptr
input/output vector r
@param[in]
dd magmaFloat_ptr
input vector d
@param[in]
dz magmaFloat_ptr
input vector z
@param[in]
dh magmaFloat_ptr
input vector h
@param[in]
skp magmaFloat_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_ssygpuk
********************************************************************/
extern "C" magma_int_t
magma_sjcgmerge_xrbeta(
magma_int_t n,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr diag,
magmaFloat_ptr dx,
magmaFloat_ptr dr,
magmaFloat_ptr dd,
magmaFloat_ptr dz,
magmaFloat_ptr dh,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 4*local_block_size * sizeof( float );
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_sjcgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( n, diag, dx, dr, dd, dz, dh, d1, skp );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_scgreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+1, 1, queue );
magma_scopyvector( 1, aux1+n, 1, skp+6, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_scg_alphabetakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
magma_scg_d_kernel<<< Gs3, Bs3, 0, queue->cuda_stream()>>>( n, skp, dh, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
|
d73a473f30ce056faa9795919381070e711d5905.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
#define SUB_GRID_WIDTH 4
#define SUB_GRID_HEIGHT 4
__shared__ float rowSumation[SUB_GRID_HEIGHT];
__global__
void addMatrixChunk(int *a)
{
float sum = 0;
int start = threadIdx.x * SUB_GRID_WIDTH;
int i = 0;
for (i = start; i < start + SUB_GRID_WIDTH; i++)
{
sum += a[blockIdx.x];
}
rowSumation[threadIdx.x] = sum;
__syncthreads();
sum = 0;
for (i = 0; i < SUB_GRID_HEIGHT; i++)
{
sum += rowSumation[i];
}
float average = sum / SUB_GRID_HEIGHT * SUB_GRID_WIDTH;
for (i = 0; i < SUB_GRID_HEIGHT * SUB_GRID_WIDTH; i++)
{
a[i] = average;
}
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| d73a473f30ce056faa9795919381070e711d5905.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
#define SUB_GRID_WIDTH 4
#define SUB_GRID_HEIGHT 4
__shared__ float rowSumation[SUB_GRID_HEIGHT];
__global__
void addMatrixChunk(int *a)
{
float sum = 0;
int start = threadIdx.x * SUB_GRID_WIDTH;
int i = 0;
for (i = start; i < start + SUB_GRID_WIDTH; i++)
{
sum += a[blockIdx.x];
}
rowSumation[threadIdx.x] = sum;
__syncthreads();
sum = 0;
for (i = 0; i < SUB_GRID_HEIGHT; i++)
{
sum += rowSumation[i];
}
float average = sum / SUB_GRID_HEIGHT * SUB_GRID_WIDTH;
for (i = 0; i < SUB_GRID_HEIGHT * SUB_GRID_WIDTH; i++)
{
a[i] = average;
}
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
aaaa0e12ca7bd0654c14e74983df0f37b227e263.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <set>
#include <cstdio>
#include <vector>
#include <algorithm>
#include <hiprand/hiprand_kernel.h>
#include <fstream>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
using namespace std;
#define bucketLimitDecr 600
#define bucketLimitIncr 1400
__device__ int d_count = 0;
__device__ int d_countNew = 0;
__global__ void colourCountFunc (int *colouring, int n, int *propagationArray){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
propagationArray[colouring[i]-1]=1;
}
__global__ void propagationColouringNewest (int *vertexArray, int *neighbourArray, int *numbers, int n, int m, int *colouring, int *propagationArray){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
if (propagationArray[i]!=0){
return;
}
int myMax = numbers[i];
// printf("I am node %d with value %d\n", i+1, myMax);
int start = -1, stop = -1;
start = vertexArray[i];
stop = vertexArray[i+1];
for (int j=start; j<stop; j++){
// printf("My neighbour %d with value %d from %d \n", neighbourArray[j], numbers[neighbourArray[j]-1], i+1);
int neighbour = neighbourArray[j]-1;
if (propagationArray[neighbour]==0 && numbers[neighbour] >= myMax){
if (numbers[neighbour] == myMax){
if (i < neighbour){
continue;
}
}
return;
}
}
propagationArray[i]=1;
atomicAdd(&d_countNew, 1);
int colours=0;
bool bucket[bucketLimitDecr];
int colouringLimit = colouring[i];
for (int j=0; j<colouringLimit-1; j++){
bucket[j]=true;
}
for (int j=start; j<stop; j++){
if (neighbourArray[j]==0){
continue;
}
int bucketIndex = colouring[neighbourArray[j]-1];
if (bucketIndex < colouringLimit){
bucket[bucketIndex-1] = false;
}
}
for (int j=0; j<colouringLimit-1; j++){
if(bucket[j]){
colours=j+1;
break;
}
}
if (colours >= colouringLimit){
printf("R DANGER DANGER DANGER DANGER DANGER DANGER DANGER\n");
}
if (!colours){
return;
}
colouring[i]=colours;
}
__global__ void propagationColouringNew (int *vertexArray, int *neighbourArray, int n, int m, int *numbers, int *colouring, int *propagationArray1, int *propagationArray2, int size){
int i = blockIdx.x + blockDim.x + threadIdx.x;
if (i>size){
return;
}
int me = propagationArray1[i];
int myMax = numbers[me-1];
int start = -1;
int stop = -1;
start = vertexArray[me-1];
stop = vertexArray[me];
int max = true;
for (int j=start; j<stop; j++){
// printf("My neighbour %d with value %d from %d \n", neighbourArray[j], numbers[neighbourArray[j]-1], i+1);
if (neighbourArray[j]==0){
continue;
}
int neighbour = neighbourArray[j]-1;
if (numbers[neighbour] >= myMax){
if (numbers[neighbour] == myMax){
if ((me-1) < neighbour){
continue;
}
}
max = false;
break;
}
}
if (!max){
propagationArray2[atomicAdd(&d_count, 1)]=me;
return;
}
bool bucket[bucketLimitDecr];
int colouringLimit = colouring[me-1];
for (int j=0; j<colouringLimit-1; j++){
bucket[j]=true;
}
for (int j=start; j<start; j++){
if (neighbourArray[j]==0){
continue;
}
int bucketIndex = colouring[neighbourArray[j]-1];
if (bucketIndex < colouringLimit){
bucket[bucketIndex-1] = false;
}
}
for (int j=0; j<colouringLimit; j++){
if(bucket[j]){
colouring[me-1]=j+1;
return;
}
}
}
__global__ void propagationColouring (int *vertexArray, int *neighbourArray, int n, int m, int *numbers, int *colouring, int *propagationArray){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n){
return;
}
if (propagationArray[i]==0){
return;
}
int myValue = numbers[i];
// printf("I am node %d with value %d and colouring %d\n", i+1, myValue, colouring[i]);
int start = -1, stop = -1;
start = vertexArray[i];
stop = vertexArray[i+1];
for (int j=start; j<stop; j++){
// printf("My neighbour %d with value %d from %d \n", neighbourArray[j], numbers[neighbourArray[j]-1], i+1);
int neighbour = neighbourArray[j];
if (neighbour==0){
continue;
}
neighbour--;
if (propagationArray[neighbour]==1 && numbers[neighbour] >= myValue){
if (numbers[neighbour] == myValue){
if (i < neighbour){
continue;
}
}
return;
}
}
propagationArray[i]=0;
printf("", propagationArray[i]);
// printf("\n%d\n", i+1);
int colours=0;
bool bucket[bucketLimitDecr];
int colouringLimit = colouring[i];
for (int j=0; j<colouringLimit-1; j++){
bucket[j]=true;
}
for (int j=start; j<stop; j++){
if (neighbourArray[j]==0){
continue;
}
int bucketIndex = colouring[neighbourArray[j]-1];
if (bucketIndex < colouringLimit){
bucket[bucketIndex-1] = false;
}
}
for (int j=0; j<colouringLimit-1; j++){
if(bucket[j]){
colours=j+1;
break;
}
}
if (colours >= colouringLimit){
printf("R DANGER DANGER DANGER DANGER DANGER DANGER DANGER\n");
}
if (!colours){
return;
}
colouring[i]=colours;
for (int j=start; j<stop; j++){
if (neighbourArray[j]==0){
continue;
}
propagationArray[neighbourArray[j]-1]=1;
}
}
__global__ void decrementalColouringNew (int *vertexArray, int *neighbourArray, int n, int m, int *decrementalArray, int size){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= size){
return;
}
int startStart, startStop;
int me, you;
// int otheri;
// bool ipercent2 = false;
me = decrementalArray[i];
if (i%2 == 0){
you = decrementalArray[i+1];
// otheri = i+1;
// ipercent2 = true;
}
else{
you = decrementalArray[i-1];
// otheri = i-1;
}
//printf("I am %d and I am deleting %d - %d\n", i, me, you);
startStart = vertexArray[me-1];
startStop = vertexArray[me];
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==you){
neighbourArray[j]=0;
break;
}
}
}
__global__ void decrementalColouring (int *vertexArray, int *neighbourArray, int n, int m, int *colouring, int start, int end){
int i = threadIdx.x;
int startStart, startStop;
int me, you;
if (i==0){
me = start;
you = end;
// printf("I am %d and my me is %d and my you is %d", i, me, you);
}
else{
me = end;
you = start;
// printf("I am %d and my me is %d and my you is %d", i, me, you);
}
startStart = vertexArray[me-1];
// printf("I am %d and my startStart is %d", i, startStart);
if (me==n){
startStop = 2*m;
}
else{
startStop = vertexArray[me];
}
// printf("I am %d and my startStop is %d", i, startStop);
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==you){
neighbourArray[j]=0;
break;
}
}
__syncthreads();
int colours=0;
bool bucket[bucketLimitDecr];
int colouringLimit = colouring[me-1];
for (int j=0; j<colouringLimit-1; j++){
bucket[j]=true;
}
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
continue;
}
int bucketIndex = colouring[neighbourArray[j]-1];
if (bucketIndex < colouringLimit){
bucket[bucketIndex-1] = false;
}
}
for (int j=0; j<colouringLimit; j++){
if(bucket[j]){
colours=j+1;
break;
}
}
if (!colours){
return;
}
colouring[me-1]=colours;
}
__global__ void incrementalColouringNew (int *vertexArray, int *neighbourArray, int n, int m, int *colouring, int *incrementalArray, int incrementalCount, int maxColour, int *colours, int *coloursSecond){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= incrementalCount){
return;
}
int startStart, startStop;
int me, you;
int otheri;
bool ipercent2 = false;
me = incrementalArray[i];
if (i%2 == 0){
you = incrementalArray[i+1];
otheri = i+1;
ipercent2 = true;
}
else{
you = incrementalArray[i-1];
otheri = i-1;
}
startStart = vertexArray[me-1];
startStop = vertexArray[me];
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
neighbourArray[j]=you;
break;
}
}
__syncthreads();
if (colouring[me-1]!=colouring[you-1]){
return;
}
// if (i==0)
// printf("%d and %d Conflict\n", start, end);
colours[i]=0;
coloursSecond[i]=0;
// if (i==0)
// printf("I am %d and %d and %d\n", i, colours[i], colours[1-i]);
bool bucket[bucketLimitIncr];
for (int j=0; j<maxColour; j++){
bucket[j]=true;
}
// if (i==0){
// printf("%d %d", startStart, startStop);
//
// for (int j=startStart; j<startStop; j++){
//
// printf("clo %d\n", neighbourArray[j]);
//
// if (neighbourArray[j]!=0){
// printf("clocli %d\n", colouring[neighbourArray[j]-1]);
// }
// }
// }
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
continue;
}
bucket[colouring[neighbourArray[j]-1]-1] = false;
// if (i==0)
// printf("buvket clo %d and %d and %d\n", neighbourArray[j]-1, colouring[neighbourArray[j]-1], bucket[colouring[neighbourArray[j]-1]-1]);
}
// for (int j=0; j<maxColour; j++){
// if(bucket[j]){
// colours[i]=j+1;
//// printf("%d ashhas \t", j+1);
// break;
// }
// }
//
bool first = true;
for (int j=0; j<maxColour; j++){
if(bucket[j]){
if (first){
colours[i]=j+1;
first = false;
}
// printf("%d ashhas \t", j+1);
else{
coloursSecond[i]=j+1;
break;
}
}
}
// if (i==0)
// for (int j=0; j<maxColour; j++){
// printf("%d \t",bucket[j]);
// }
// if (i==0){
// printf("\n");
// }
__syncthreads();
//__threadfence();
// printf("%d and %d Conflict new colour min %d \n", start, end, colours[i]);
// Possible issue: There could be a number inbetween the smallest equal guess and the current colour.
//printf("", colours[otheri]); ?????????????
// printf("colours[%d] = %d\n", i, colours[i]);
// printf("coloursOthers[%d] = %d\n", otheri, colours[otheri]);
//colours[otheri]+=0;
if (colours[i]==colours[otheri]){
if (colours[i]<colouring[me-1]){
if (coloursSecond[i] < coloursSecond[otheri]){
if (coloursSecond[i] < colouring[me-1]){
colouring[me-1]=coloursSecond[i];
}
}
else if (coloursSecond[i] == coloursSecond[otheri]) {
if (ipercent2){
colouring[me-1]=colours[i];
}
else{
if (coloursSecond[i] < colouring[me-1]){
colouring[me-1]=coloursSecond[i];
}
}
}
else{
colouring[me-1]=colours[i];
}
}
else{
if (!ipercent2){
colouring[me-1]=colours[i];
}
}
}
// if (colours[i]==colours[otheri]){
//// printf("if\n");
// if (colours[i]<colouring[me-1]){
// if(ipercent2){
// colouring[me-1]=colours[i];
// }
// }
//
// else{
// if (!ipercent2){
// colouring[me-1]=colours[i];
// }
// }
// }
else{
// printf("else\n");
if (colours[i]<colouring[me-1]){
colouring[me-1]=colours[i];
}
else{
if (colours[i]<colours[otheri]){
colouring[me-1]=colours[i];
}
}
}
// __syncthreads();
//
// if (i==0){
// for (int j=0; j<n; j++){
// printf("%d ", vertexArray[j]);
// }
// printf("\n");
//
// for (int j=0; j<m; j++){
// printf("%d ", neighbourArray[j]);
// }
// printf("\n");
// }
// if (i==0){
// for (int j=0; j<n; j++){
// printf("%d ", colouring[j]);
// }
// printf("\n");
// }
//
}
//__global__ void incrementalColouringNewP1 (int *vertexArray, int *neighbourArray, int n, int m, int *colouring, int *incrementalArray, int incrementalCount, int maxColour, int *colours){
//
// int i = blockDim.x * blockIdx.x + threadIdx.x;
//
// if (i >= incrementalCount){
// return;
// }
//
// int startStart, startStop;
// int me, you;
// int otheri;
// bool ipercent2 = false;
//
// me = incrementalArray[i];
//
// if (i%2 == 0){
// you = incrementalArray[i+1];
// otheri = i+1;
// ipercent2 = true;
// }
// else{
// you = incrementalArray[i-1];
// otheri = i-1;
// }
//
// startStart = vertexArray[me-1];
//
// if (me==n){
// startStop = 2*m;
// }
//
// else{
// startStop = vertexArray[me];
// }
//
// for (int j=startStart; j<startStop; j++){
// if (neighbourArray[j]==0){
// neighbourArray[j]=you;
// break;
// }
// }
//
//}
//__global__ void incrementalColouringNewP2 (int *vertexArray, int *neighbourArray, int n, int m, int *colouring, int *incrementalArray, int incrementalCount, int maxColour, int *colours){
//
// int i = blockDim.x * blockIdx.x + threadIdx.x;
//
// if (i >= incrementalCount){
// return;
// }
//
// int startStart, startStop;
// int me, you;
// int otheri;
// bool ipercent2 = false;
//
// me = incrementalArray[i];
//
// if (i%2 == 0){
// you = incrementalArray[i+1];
// otheri = i+1;
// ipercent2 = true;
// }
// else{
// you = incrementalArray[i-1];
// otheri = i-1;
// }
//
// colours[i]=0;
//
//
// bool bucket[bucketLimitIncr];
//
// for (int j=0; j<maxColour; j++){
// bucket[j]=true;
// }
//
// for (int j=startStart; j<startStop; j++){
// if (neighbourArray[j]==0){
// continue;
// }
//
// bucket[colouring[neighbourArray[j]-1]-1] = false;
// }
//
// for (int j=0; j<maxColour; j++){
// if(bucket[j]){
// colours[i]=j+1;
// break;
// }
// }
//
//}
//__global__ void incrementalColouringNewP3 (int *vertexArray, int *neighbourArray, int n, int m, int *colouring, int *incrementalArray, int incrementalCount, int maxColour, int *colours){
//
// int i = blockDim.x * blockIdx.x + threadIdx.x;
//
// if (i >= incrementalCount){
// return;
// }
//
// int startStart, startStop;
// int me, you;
// int otheri;
// bool ipercent2 = false;
//
// me = incrementalArray[i];
//
// if (i%2 == 0){
// you = incrementalArray[i+1];
// otheri = i+1;
// ipercent2 = true;
// }
// else{
// you = incrementalArray[i-1];
// otheri = i-1;
// }
//
// if (colouring[me-1]!=colouring[you-1]){
// return;
// }
//
// if (colours[i]==colours[otheri]){
// printf("if\n");
// if (colours[i]<colouring[me-1]){
// if(ipercent2){
// colouring[me-1]=colours[i];
// }
// }
//
// else{
// if (!ipercent2){
// colouring[me-1]=colours[i];
// }
// }
// }
//
// else{
// printf("else\n");
// if (colours[i]<colouring[me-1]){
// colouring[me-1]=colours[i];
// }
//
// else{
// if (colours[i]<colours[otheri]){
// colouring[me-1]=colours[i];
// }
// }
// }
//
//
//}
__global__ void incrementalColouring (int *vertexArray, int *neighbourArray, int n, int m, int *colouring, int start, int end, int maxColour){
int i = threadIdx.x;
int startStart, startStop;
int me, you;
if (i==0){
me = start;
you = end;
}
else{
me = end;
you = start;
}
startStart = vertexArray[me-1];
if (me==n){
startStop = 2*m;
}
else{
startStop = vertexArray[me];
}
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
neighbourArray[j]=you;
break;
}
}
__syncthreads();
if (colouring[start-1]!=colouring[end-1]){
return;
}
// if (i==0)
// printf("%d and %d Conflict\n", start, end);
__shared__ int colours[2];
colours[i]=0;
// if (i==0)
// printf("I am %d and %d and %d\n", i, colours[i], colours[1-i]);
bool bucket[bucketLimitIncr];
for (int j=0; j<maxColour; j++){
bucket[j]=true;
}
// if (i==0){
// printf("%d %d", startStart, startStop);
//
// for (int j=startStart; j<startStop; j++){
//
// printf("clo %d\n", neighbourArray[j]);
//
// if (neighbourArray[j]!=0){
// printf("clocli %d\n", colouring[neighbourArray[j]-1]);
// }
// }
// }
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
continue;
}
bucket[colouring[neighbourArray[j]-1]-1] = false;
// if (i==0)
// printf("buvket clo %d and %d and %d\n", neighbourArray[j]-1, colouring[neighbourArray[j]-1], bucket[colouring[neighbourArray[j]-1]-1]);
}
for (int j=0; j<maxColour; j++){
if(bucket[j]){
colours[i]=j+1;
// printf("%d ashhas \t", j+1);
break;
}
}
// if (i==0)
// for (int j=0; j<maxColour; j++){
// printf("%d \t",bucket[j]);
// }
// if (i==0){
// printf("\n");
// }
__syncthreads();
// printf("%d and %d Conflict new colour min %d \n", start, end, colours[i]);
// Possible issue: There could be a number inbetween the smallest equal guess and the current colour.
if (colours[i]==colours[1-i]){
if (colours[i]<colouring[me-1]){
if(i==0){
colouring[me-1]=colours[i];
}
}
else{
if (i==1){
colouring[me-1]=colours[i];
}
}
}
else{
if (colours[i]<colouring[me-1]){
colouring[me-1]=colours[i];
}
else{
if (colours[i]<colours[1-i]){
colouring[me-1]=colours[i];
}
}
}
__syncthreads();
// if (i==0){
// for (int j=0; j<n; j++){
// printf("%d ", colouring[j]);
// }
// printf("\n");
// }
}
__global__ void colourMinMax (int *vertexArray, int *neighbourArray, int *numbers, int n, int m, int *colouring, int currentColour){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
if (colouring[i]!=0){
return;
}
int myValue = numbers[i];
// printf("I am node %d with value %d\n", i+1, myMax);
int start = -1, stop = -1;
start = vertexArray[i];
stop = vertexArray[i+1];
bool max = true, min = true;
for (int j=start; j<stop; j++){
// printf("My neighbour %d with value %d from %d \n", neighbourArray[j], numbers[neighbourArray[j]-1], i+1);
int neighbour = neighbourArray[j];
if (neighbour==0){
continue;
}
neighbour--;
if (max && colouring[neighbour]==0 && numbers[neighbour] >= myValue){
if (numbers[neighbour] == myValue){
if (i < neighbour){
continue;
}
}
max=false;
if (!min){
return;
}
}
if (min && colouring[neighbour]==0 && numbers[neighbour] <= myValue){
if (numbers[neighbour] == myValue){
if (i > neighbour){
continue;
}
}
min=false;
if (!max){
return;
}
}
}
if (max){
colouring[i] = currentColour;
}
else if (min){
colouring[i] = currentColour+1;
}
atomicAdd(&d_count, 1);
}
__global__ void setup_kernel (hiprandState_t * state, unsigned long seed ){
int i= blockDim.x * blockIdx.x + threadIdx.x;
hiprand_init (seed, i, 0, &state[i]);
}
__global__ void randomNumbering (hiprandState_t* globalState, int *degreeCount, int n, int limit){
int i= blockDim.x * blockIdx.x + threadIdx.x;
hiprandState_t localState = globalState[i];
float RANDOM = hiprand_uniform( &localState );
globalState[i] = localState;
RANDOM *= (limit - 1 + 0.999999);
RANDOM += 1;
degreeCount[i] = (int) RANDOM;
}
__global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
int start = -1, stop = -1;
int diff=0;
start = vertexArray[i];
stop = vertexArray[i+1];
diff = stop-start;
degreeCount[i]=diff;
}
void edgesPrint (int vertexArray[], int neighbourArray[], int n, int m){
for (int i=0; i<n-1; i++){
for (int j = vertexArray[i]; j < vertexArray[i+1]; ++j){
cout<<"e "<<i+1<<" "<<neighbourArray[j]<<endl;
/* code */
}
}
for (int j = vertexArray[n-1]; j < m; ++j)
{
cout<<"e "<<n<<" "<<neighbourArray[j]<<endl;
/* code */
}
}
void addEdge (int *vertexArray, int *neighbourArray, int n, int m, int start, int stop, int &lastStart, int &lastStop){
int startStart, startStop;
int stopStart, stopStop;
startStart = vertexArray[start-1];
stopStart = vertexArray[stop-1];
startStop = vertexArray[start];
stopStop = vertexArray[stop];
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
neighbourArray[j]=stop;
lastStart = j;
break;
}
}
for (int j=stopStart; j<stopStop; j++){
if (neighbourArray[j]==0){
neighbourArray[j]=start;
lastStop = j;
break;
}
}
}
void deleteEdge (int *neighbourArray, int lastStart, int lastStop){
neighbourArray[lastStart]=0;
neighbourArray[lastStop]=0;
}
bool isPermissible (int *incrementalArray, int incrementalCount, int *vertexArray, int *neighbourArray, int n, int m, int start, int stop){
int lastStart = 0, lastStop = 0;
addEdge ( vertexArray, neighbourArray, n, m, start, stop, lastStart, lastStop );
for (int i=0; i<incrementalCount; i++){
if (incrementalArray[i] == start || incrementalArray[i] == stop){
deleteEdge(neighbourArray, lastStart, lastStop);
return false;
}
int startStart, startStop;
startStart = vertexArray[incrementalArray[i]-1];
startStop = vertexArray[incrementalArray[i]];
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
continue;
}
if (neighbourArray[j]==start || neighbourArray[j]==stop){
deleteEdge(neighbourArray, lastStart, lastStop);
return false;
}
}
}
return true;
}
int main(int argc, char const *argv[])
{
int n, m;
cin>>n>>m;
ofstream fout;
fout.open("output4.txt",ios::app);
double rLimit = 1-(30000.0/m);
if (m < 900000){
rLimit = 0.5;
}
int h_maxColour;
int *h_count = new int;
int *h_vertexArray = new int [n+1];
int *h_neighbourArray = new int [2*m];
int *h_degreeCount = new int [n];
int *h_colour = new int [n];
int *h_propagationArray1 = new int [n];
int *h_propagationArray2 = new int [n];
int *d_vertexArray = NULL;
hipMalloc((void **)&d_vertexArray, (n+1)*sizeof(int));
int *d_neighbourArray = NULL;
hipMalloc((void **)&d_neighbourArray, 2*m*sizeof(int));
int *d_colour = NULL;
hipMalloc((void **)&d_colour, (n)*sizeof(int));
hipMemset((void *)d_colour, 0, (n)*sizeof(int));
int *d_propagationArray1 = NULL;
hipMalloc((void **)&d_propagationArray1, (1400)*sizeof(int));
hipMemset((void *)d_propagationArray1, 0, (1400)*sizeof(int));
int *d_propagationArray2 = NULL;
hipMalloc((void **)&d_propagationArray2, (n)*sizeof(int));
hipMemset((void *)d_propagationArray2, 0, (n)*sizeof(int));
int *d_degreeCount = NULL;
hipMalloc((void **)&d_degreeCount, (n)*sizeof(int));
hipMemset((void *)d_degreeCount, 0, (n)*sizeof(int));
hiprandState_t* devStates;
hipMalloc ( &devStates, n*sizeof( hiprandState_t ) );
int offset = 0;
vector<int> startArray, stopArray;
cin>>h_maxColour;
for (int i = 0; i < n; ++i)
{
h_vertexArray[i]=offset;
int degree;
cin>>degree;
offset+=degree;
h_propagationArray1[i]=0;
}
h_vertexArray[n]=2*m;
for (int i = 0; i < 2*m; ++i)
{
h_neighbourArray[i]=0;
}
for (int i = 0; i < m; ++i)
{
int start;
int end;
cin>>start>>end;
double r = ((double) rand() / (RAND_MAX));
int startStart, startStop, stopStart, stopStop;
startStart = h_vertexArray[start-1];
startStop = h_vertexArray[start];
stopStart = h_vertexArray[end-1];
stopStop = h_vertexArray[end];
for (int j=startStart; j<startStop; j++){
if (h_neighbourArray[j]==0){
h_neighbourArray[j]=end;
break;
}
}
for (int j=stopStart; j<stopStop; j++){
if (h_neighbourArray[j]==0){
h_neighbourArray[j]=start;
break;
}
}
if (r>rLimit){
startArray.push_back(start);
stopArray.push_back(end);
}
}
// for (int i=0; i<n+1; i++){
// cout<<h_vertexArray[i]<<" ";
// }
//
// cout<<endl;
//
// for (int i=0; i<2*m; i++){
// cout<<h_neighbourArray[i]<<" ";
// }
//
// cout<<endl;
hipEvent_t start, stop;
float timeNew;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipMemcpy(d_vertexArray, h_vertexArray, (n+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_neighbourArray, h_neighbourArray, 2*m*sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
int threadsPerBlock = 512;
int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( setup_kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, devStates, time(NULL) );
hipLaunchKernelGGL(( randomNumbering), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, devStates, d_degreeCount, n, n);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
// hipMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), hipMemcpyDeviceToHost);
// cout<<"Random numbers: "<<endl;
//
// for (int i=0; i<n; i++){
// cout<<h_degreeCount[i]<<endl;
// }
int colourCount = 1;
// cout<<"Worklist: "<<endl;
//
// for (int i=0; i<startArray.size(); i++){
// cout<<startArray[i]<<" "<<stopArray[i]<<endl;
// }
hipEventRecord(start, 0);
while (1){
hipLaunchKernelGGL(( colourMinMax), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_colour, colourCount);
hipMemcpyFromSymbol(h_count, d_count, sizeof(int), 0, hipMemcpyDeviceToHost);
// cout<<"H Count = "<<*h_count<<"at colour: "<<colourCount<<endl;
if (*h_count == n){
break;
}
colourCount+=2;
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
colourCount++;
thrust::device_ptr<int> c_ptr = thrust::device_pointer_cast(d_colour);
int maxColour = *(thrust::max_element(c_ptr, c_ptr + n));
cout<<"Max Colour = "<<maxColour<<endl;
fout<<maxColour<<"\t";
int maxColourNew;
thrust::device_ptr<int> d_propagationArray_ptr = thrust::device_pointer_cast(d_propagationArray1);
maxColourNew = 0;
hipLaunchKernelGGL(( colourCountFunc), dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_colour, n, d_propagationArray1);
maxColourNew = thrust::reduce(d_propagationArray_ptr, d_propagationArray_ptr + 1400);
hipMemset((void *)d_propagationArray1, 0, (1400)*sizeof(int));
fout<<maxColourNew<<"\t";
// hipMemcpy(h_colour, d_colour, n*sizeof(int), hipMemcpyDeviceToHost);
// cout<<"Colour numbers: "<<endl;
//
//
//
// for (int i=0; i<n; i++){
// cout<<h_colour[i]<<endl;
// }
cout<<"Size: "<<startArray.size()<<endl;
fout<<startArray.size()<<"\t";
int *d_incrementalArray = NULL;
hipMalloc((void **)&d_incrementalArray, 2*startArray.size()*sizeof(int));
// int *d_colours = NULL;
// hipMalloc((void **)&d_colours, 1024*sizeof(int));
//
// int *d_coloursSecond = NULL;
// hipMalloc((void **)&d_coloursSecond, 1024*sizeof(int));
//
int *h_incrementalArray = new int [2*startArray.size()];
//
// vector<bool> marked (startArray.size(), false);
//
// int incrementalCount = 0;
//
//// hipMemcpy(h_colour, d_colour, n*sizeof(int), hipMemcpyDeviceToHost);
////
////
//// cout<<"Colour numbers: "<<endl;
////
////
////
//// for (int i=0; i<n; i++){
//// cout<<h_colour[i]<<endl;
//// }
// int printCount = 0;
//
// hipEventRecord(start, 0);
//
//
// for (int i=0; i<startArray.size(); i++){
// if (marked[i]){
// continue;
// }
//
// int lastStart, lastStop;
//
// incrementalCount = 0;
//
// addEdge(h_vertexArray, h_neighbourArray, n, m, startArray[i], stopArray[i], lastStart, lastStop);
//
// marked[i]=true;
//
// h_incrementalArray[incrementalCount] = startArray[i];
// h_incrementalArray[incrementalCount+1] = stopArray[i];
//
// incrementalCount+=2;
//
// for (int j = i+1; j<startArray.size(); j++){
// if (marked[j]){
// continue;
// }
//
// if (isPermissible (h_incrementalArray, incrementalCount, h_vertexArray, h_neighbourArray, n, m, startArray[j], stopArray[j])){
// marked[j]=true;
// h_incrementalArray[incrementalCount] = startArray[j];
// h_incrementalArray[incrementalCount+1] = stopArray[j];
//
// incrementalCount+=2;
//
// if (incrementalCount == 1024){
// break;
// }
// }
// }
//
//// for (int j=0; j<incrementalCount; j++){
//// cout<<h_incrementalArray[j]<<" ";
//// }
//// cout<<endl;
// int threadsPerBlockIncremental=1024;
// int blocksPerGridIncremental = (incrementalCount + threadsPerBlockIncremental -1)/threadsPerBlockIncremental;
//
// if (blocksPerGridIncremental!=1){
// cout<<"DANGER DANGER DANGER DANGER DANGER DANGER DANGER DANGER"<<endl;
// }
//
// hipMemcpy(d_incrementalArray, h_incrementalArray, incrementalCount*sizeof(int), hipMemcpyHostToDevice);
//
//// cout<<incrementalCount<<endl;
// incrementalColouringNew<<<blocksPerGridIncremental, threadsPerBlockIncremental>>>(d_vertexArray, d_neighbourArray, n, m, d_colour, d_incrementalArray, incrementalCount, 1400, d_colours, d_coloursSecond);
// printCount++;
//// incrementalColouringNewP1<<<threadsPerBlock, blocksPerGridIncremental>>>(d_vertexArray, d_neighbourArray, n, m, d_colour, d_incrementalArray, incrementalCount, h_maxColour, d_colours);
//// incrementalColouringNewP2<<<threadsPerBlock, blocksPerGridIncremental>>>(d_vertexArray, d_neighbourArray, n, m, d_colour, d_incrementalArray, incrementalCount, h_maxColour, d_colours);
//// incrementalColouringNewP3<<<threadsPerBlock, blocksPerGridIncremental>>>(d_vertexArray, d_neighbourArray, n, m, d_colour, d_incrementalArray, incrementalCount, h_maxColour, d_colours);
//
//
//// hipDeviceSynchronize();
//
//// hipMemcpy(h_colour, d_colour, n*sizeof(int), hipMemcpyDeviceToHost);
////
////
//// cout<<"Colour numbers: "<<endl;
////
////
////
//// for (int i=0; i<n; i++){
//// cout<<h_colour[i]<<endl;
//// }
////
//
//
// }
//
//
// hipEventRecord(stop, 0);
// hipEventSynchronize(stop);
//
// hipEventElapsedTime(&timeNew, start, stop);
//
// fout<<timeNew<<"\t";
//
// fout<<printCount<<"\t";
//
// hipMemcpy(h_colour, d_colour, n*sizeof(int), hipMemcpyDeviceToHost);
// cout<<"Colour numbers: "<<endl;
//
//
//
// for (int i=0; i<n; i++){
// cout<<h_colour[i]<<endl;
// }
// for (int i=0; i<startArray.size(); i++){
//
// int startStart = h_vertexArray[startArray[i]-1];
// int stopStart = h_vertexArray[stopArray[i]-1];
// int startStop = 0;
// int stopStop = 0;
//
// if (startArray[i]==n){
// startStop = 2*m;
// }
//
// else{
// startStop = h_vertexArray[startArray[i]];
// }
//
// if (stopArray[i]==n){
// stopStop = 2*m;
// }
//
// else{
// stopStop = h_vertexArray[stopArray[i]];
// }
//
// for (int j=startStart; j<startStop; j++){
// if (neighbourArray[j]==0){
// neighbourArray[j]=stopArray[i];
// break;
// }
// }
//
// for (int j=stopStart; j<stopStop; j++){
// if (neighbourArray[j]==0){
// neighbourArray[j]=startArray[i];
// break;
// }
// }
//
// //cout<<"New added edge: "<<startArray[i]<<" "<<stopArray[i]<<endl;
// if (incrementalCount == 0){
// h_incrementalArray[incrementalCount] = startArray[i];
// h_incrementalArray[incrementalCount+1] = stopArray[i];
//
// incrementalCount+=2;
// }
//
// else{
// for (int j=0; j<incrementalCount; j++){
//
// startStart = h_vertexArray[h_incrementalArray[j]];
// startStop = 0;
//
// if (h_incrementalArray[j]==n){
// startStop = 2*m;
// }
//
// else{
// startStop = h_vertexArray[h_incrementalArray[j]];
// }
// }
// }
//
//
//
// incrementalColouring<<<1, 2>>>(d_vertexArray, d_neighbourArray, n, m, d_colour, startArray[i], stopArray[i], h_maxColour);
//
// hipDeviceSynchronize();
//
// }
//
// set<int> tempSet;
// set<int>::iterator it;
for (int i=0; i<startArray.size(); i++){
h_incrementalArray[2*i]=startArray[i];
h_incrementalArray[2*i+1]=stopArray[i];
// h_propagationArray1[startArray[i]-1]=1;
// h_propagationArray1[stopArray[i]-1]=1;
//
// tempSet.insert(startArray[i]);
// tempSet.insert(stopArray[i]);
}
// for (int i=0; i<tempSet.size(); i++){
// h_propagationArray[i] = tempSet[i];
// }
// cout<<"Decremental Array:"<<endl;
//
//
// for (int i=0; i<startArray.size(); i++){
//
// cout<<h_incrementalArray[2*i]<<" "<<h_incrementalArray[2*i+1]<<endl;
//
// }
// hipMemcpy(h_vertexArray, d_vertexArray, (n+1)*sizeof(int), hipMemcpyDeviceToHost);
// hipMemcpy(h_neighbourArray, d_neighbourArray, 2*m*sizeof(int), hipMemcpyDeviceToHost);
//
// for (int i=0; i<(n+1); i++){
// cout<<h_vertexArray[i]<<" ";
// }
//
// cout<<endl;
//
// for (int i=0; i<2*m; i++){
// cout<<h_neighbourArray[i]<<" ";
// }
//
// cout<<endl;
hipEventRecord(start, 0);
hipMemcpy(d_incrementalArray, h_incrementalArray, 2*startArray.size()*sizeof(int), hipMemcpyHostToDevice);
int blocksPerGridDecremental = (2*startArray.size() + threadsPerBlock -1)/threadsPerBlock;
hipLaunchKernelGGL(( decrementalColouringNew), dim3(blocksPerGridDecremental), dim3(threadsPerBlock), 0, 0, d_vertexArray, d_neighbourArray, n, m, d_incrementalArray, 2*startArray.size());
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
// hipDeviceSynchronize();
// hipMemcpy(h_colour, d_colour, n*sizeof(int), hipMemcpyDeviceToHost);
// cout<<"Colour numbers: "<<endl;
//
//
//
// for (int i=0; i<n; i++){
// cout<<h_colour[i]<<endl;
// }
//
// hipMemcpy(d_propagationArray1, h_propagationArray1, n*sizeof(int), hipMemcpyHostToDevice);
// hipMemcpy(d_propagationArray1, h_propagationArray, tempSet.size()*sizeof(int), hipMemcpyHostToDevice);
// bool flip = true;
//
// int blocksPerGridPropagation = (n + threadsPerBlock -1)/threadsPerBlock;
//
// while (1){
// hipMemcpyFromSymbol(h_count, d_count, sizeof(int), 0, hipMemcpyDeviceToHost);
//
// if (*h_count == 0){
// break;
// }
//
// blocksPerGridPropagation = (*h_count + threadsPerBlock -1)/threadsPerBlock;
//
// if (flip){
// flip = false;
// }
//
// else{
// flip = true;
// }
//
//
// }
//
//hipMemcpy(h_colour, d_colour, n*sizeof(int), hipMemcpyDeviceToHost);
// cout<<"Colour numbers: "<<endl;
//
//
//
// for (int i=0; i<n; i++){
// cout<<h_colour[i]<<endl;
// }
// cout<<"{ ";
//
// for (int i=0; i<n; i++){
// if (h_propagationArray1[i]!=0){
// cout<<i+1<<" ";
// }
// }
//
// cout<<"}"<<endl;
maxColour = *(thrust::max_element(c_ptr, c_ptr + n));
cout<<"Max Colour = "<<maxColour<<endl;
fout<<maxColour<<"\t";
maxColourNew = 0;
hipLaunchKernelGGL(( colourCountFunc), dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_colour, n, d_propagationArray1);
maxColourNew = thrust::reduce(d_propagationArray_ptr, d_propagationArray_ptr + 1400);
hipMemset((void *)d_propagationArray1, 0, (1400)*sizeof(int));
fout<<maxColourNew<<"\t";
hipEventRecord(start, 0);
while (1){
hipLaunchKernelGGL(( propagationColouringNewest), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_colour, d_propagationArray2);
hipMemcpyFromSymbol(h_count, d_countNew, sizeof(int), 0, hipMemcpyDeviceToHost);
// cout<<"H Count = "<<*h_count<<endl;
if (*h_count == n){
break;
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
maxColour = *(thrust::max_element(c_ptr, c_ptr + n));
cout<<"Max Colour = "<<maxColour<<endl;
fout<<maxColour<<"\t";
maxColourNew = 0;
hipLaunchKernelGGL(( colourCountFunc), dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_colour, n, d_propagationArray1);
maxColourNew = thrust::reduce(d_propagationArray_ptr, d_propagationArray_ptr + 1400);
hipMemset((void *)d_propagationArray1, 0, (1400)*sizeof(int));
fout<<maxColourNew<<"\t";
// int countPropagation = 0;
// thrust::device_ptr<int> d_propagationArray_ptr = thrust::device_pointer_cast(d_propagationArray1);
// do{
//
// propagationColouring<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, n, m, d_degreeCount, d_colour, d_propagationArray1);
//
// hipDeviceSynchronize();
//
// countPropagation = thrust::reduce(d_propagationArray_ptr, d_propagationArray_ptr + n);
//
// cout<<countPropagation<<endl;
// hipMemcpy(h_propagationArray1, d_propagationArray1, n*sizeof(int), hipMemcpyDeviceToHost);
//
//// cout<<"{ ";
////
//// for (int i=0; i<n; i++){
//// if (h_propagationArray1[i]!=0){
//// cout<<i+1<<" ";
//// }
//// }
////
//// cout<<"}"<<endl;
//
//// hipMemcpy(h_colour, d_colour, n*sizeof(int), hipMemcpyDeviceToHost);
//// cout<<"Colour numbers: "<<endl;
////
////
////
//// for (int i=0; i<n; i++){
//// cout<<h_colour[i]<<endl;
//// }
//
// }while (countPropagation);
//
// cout<<"Shamil "<<printCount<<endl;
hipEventRecord(start, 0);
hipMemcpy(h_colour, d_colour, n*sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\n";
// cout<<"Colour numbers: "<<endl;
//
//
//
// for (int i=0; i<n; i++){
// cout<<h_colour[i]<<endl;
// }
// cout<<"Time for the kernel: "<<time<<" ms"<<endl;
// hipMemcpy(h_vertexArray, d_vertexArray, (n+1)*sizeof(int), hipMemcpyDeviceToHost);
// hipMemcpy(h_neighbourArray, d_neighbourArray, 2*m*sizeof(int), hipMemcpyDeviceToHost);
// for (int i=0; i<n+1; i++){
// cout<<h_vertexArray[i]<<" ";
// }
//
// cout<<endl;
//
// for (int i=0; i<2*m; i++){
// cout<<h_neighbourArray[i]<<" ";
// }
//
// cout<<endl;
delete h_count;
delete[] h_vertexArray;
delete[] h_neighbourArray;
delete[] h_degreeCount;
delete[] h_colour;
hipFree(d_neighbourArray);
hipFree(d_vertexArray);
hipFree(d_degreeCount);
hipFree(d_colour);
hipDeviceReset();
return 0;
}
| aaaa0e12ca7bd0654c14e74983df0f37b227e263.cu | #include <iostream>
#include <cstdlib>
#include <set>
#include <cstdio>
#include <vector>
#include <algorithm>
#include <curand_kernel.h>
#include <fstream>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
using namespace std;
#define bucketLimitDecr 600
#define bucketLimitIncr 1400
__device__ int d_count = 0;
__device__ int d_countNew = 0;
__global__ void colourCountFunc (int *colouring, int n, int *propagationArray){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
propagationArray[colouring[i]-1]=1;
}
__global__ void propagationColouringNewest (int *vertexArray, int *neighbourArray, int *numbers, int n, int m, int *colouring, int *propagationArray){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
if (propagationArray[i]!=0){
return;
}
int myMax = numbers[i];
// printf("I am node %d with value %d\n", i+1, myMax);
int start = -1, stop = -1;
start = vertexArray[i];
stop = vertexArray[i+1];
for (int j=start; j<stop; j++){
// printf("My neighbour %d with value %d from %d \n", neighbourArray[j], numbers[neighbourArray[j]-1], i+1);
int neighbour = neighbourArray[j]-1;
if (propagationArray[neighbour]==0 && numbers[neighbour] >= myMax){
if (numbers[neighbour] == myMax){
if (i < neighbour){
continue;
}
}
return;
}
}
propagationArray[i]=1;
atomicAdd(&d_countNew, 1);
int colours=0;
bool bucket[bucketLimitDecr];
int colouringLimit = colouring[i];
for (int j=0; j<colouringLimit-1; j++){
bucket[j]=true;
}
for (int j=start; j<stop; j++){
if (neighbourArray[j]==0){
continue;
}
int bucketIndex = colouring[neighbourArray[j]-1];
if (bucketIndex < colouringLimit){
bucket[bucketIndex-1] = false;
}
}
for (int j=0; j<colouringLimit-1; j++){
if(bucket[j]){
colours=j+1;
break;
}
}
if (colours >= colouringLimit){
printf("R DANGER DANGER DANGER DANGER DANGER DANGER DANGER\n");
}
if (!colours){
return;
}
colouring[i]=colours;
}
__global__ void propagationColouringNew (int *vertexArray, int *neighbourArray, int n, int m, int *numbers, int *colouring, int *propagationArray1, int *propagationArray2, int size){
int i = blockIdx.x + blockDim.x + threadIdx.x;
if (i>size){
return;
}
int me = propagationArray1[i];
int myMax = numbers[me-1];
int start = -1;
int stop = -1;
start = vertexArray[me-1];
stop = vertexArray[me];
int max = true;
for (int j=start; j<stop; j++){
// printf("My neighbour %d with value %d from %d \n", neighbourArray[j], numbers[neighbourArray[j]-1], i+1);
if (neighbourArray[j]==0){
continue;
}
int neighbour = neighbourArray[j]-1;
if (numbers[neighbour] >= myMax){
if (numbers[neighbour] == myMax){
if ((me-1) < neighbour){
continue;
}
}
max = false;
break;
}
}
if (!max){
propagationArray2[atomicAdd(&d_count, 1)]=me;
return;
}
bool bucket[bucketLimitDecr];
int colouringLimit = colouring[me-1];
for (int j=0; j<colouringLimit-1; j++){
bucket[j]=true;
}
for (int j=start; j<start; j++){
if (neighbourArray[j]==0){
continue;
}
int bucketIndex = colouring[neighbourArray[j]-1];
if (bucketIndex < colouringLimit){
bucket[bucketIndex-1] = false;
}
}
for (int j=0; j<colouringLimit; j++){
if(bucket[j]){
colouring[me-1]=j+1;
return;
}
}
}
__global__ void propagationColouring (int *vertexArray, int *neighbourArray, int n, int m, int *numbers, int *colouring, int *propagationArray){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n){
return;
}
if (propagationArray[i]==0){
return;
}
int myValue = numbers[i];
// printf("I am node %d with value %d and colouring %d\n", i+1, myValue, colouring[i]);
int start = -1, stop = -1;
start = vertexArray[i];
stop = vertexArray[i+1];
for (int j=start; j<stop; j++){
// printf("My neighbour %d with value %d from %d \n", neighbourArray[j], numbers[neighbourArray[j]-1], i+1);
int neighbour = neighbourArray[j];
if (neighbour==0){
continue;
}
neighbour--;
if (propagationArray[neighbour]==1 && numbers[neighbour] >= myValue){
if (numbers[neighbour] == myValue){
if (i < neighbour){
continue;
}
}
return;
}
}
propagationArray[i]=0;
printf("", propagationArray[i]);
// printf("\n%d\n", i+1);
int colours=0;
bool bucket[bucketLimitDecr];
int colouringLimit = colouring[i];
for (int j=0; j<colouringLimit-1; j++){
bucket[j]=true;
}
for (int j=start; j<stop; j++){
if (neighbourArray[j]==0){
continue;
}
int bucketIndex = colouring[neighbourArray[j]-1];
if (bucketIndex < colouringLimit){
bucket[bucketIndex-1] = false;
}
}
for (int j=0; j<colouringLimit-1; j++){
if(bucket[j]){
colours=j+1;
break;
}
}
if (colours >= colouringLimit){
printf("R DANGER DANGER DANGER DANGER DANGER DANGER DANGER\n");
}
if (!colours){
return;
}
colouring[i]=colours;
for (int j=start; j<stop; j++){
if (neighbourArray[j]==0){
continue;
}
propagationArray[neighbourArray[j]-1]=1;
}
}
__global__ void decrementalColouringNew (int *vertexArray, int *neighbourArray, int n, int m, int *decrementalArray, int size){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= size){
return;
}
int startStart, startStop;
int me, you;
// int otheri;
// bool ipercent2 = false;
me = decrementalArray[i];
if (i%2 == 0){
you = decrementalArray[i+1];
// otheri = i+1;
// ipercent2 = true;
}
else{
you = decrementalArray[i-1];
// otheri = i-1;
}
//printf("I am %d and I am deleting %d - %d\n", i, me, you);
startStart = vertexArray[me-1];
startStop = vertexArray[me];
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==you){
neighbourArray[j]=0;
break;
}
}
}
__global__ void decrementalColouring (int *vertexArray, int *neighbourArray, int n, int m, int *colouring, int start, int end){
int i = threadIdx.x;
int startStart, startStop;
int me, you;
if (i==0){
me = start;
you = end;
// printf("I am %d and my me is %d and my you is %d", i, me, you);
}
else{
me = end;
you = start;
// printf("I am %d and my me is %d and my you is %d", i, me, you);
}
startStart = vertexArray[me-1];
// printf("I am %d and my startStart is %d", i, startStart);
if (me==n){
startStop = 2*m;
}
else{
startStop = vertexArray[me];
}
// printf("I am %d and my startStop is %d", i, startStop);
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==you){
neighbourArray[j]=0;
break;
}
}
__syncthreads();
int colours=0;
bool bucket[bucketLimitDecr];
int colouringLimit = colouring[me-1];
for (int j=0; j<colouringLimit-1; j++){
bucket[j]=true;
}
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
continue;
}
int bucketIndex = colouring[neighbourArray[j]-1];
if (bucketIndex < colouringLimit){
bucket[bucketIndex-1] = false;
}
}
for (int j=0; j<colouringLimit; j++){
if(bucket[j]){
colours=j+1;
break;
}
}
if (!colours){
return;
}
colouring[me-1]=colours;
}
__global__ void incrementalColouringNew (int *vertexArray, int *neighbourArray, int n, int m, int *colouring, int *incrementalArray, int incrementalCount, int maxColour, int *colours, int *coloursSecond){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= incrementalCount){
return;
}
int startStart, startStop;
int me, you;
int otheri;
bool ipercent2 = false;
me = incrementalArray[i];
if (i%2 == 0){
you = incrementalArray[i+1];
otheri = i+1;
ipercent2 = true;
}
else{
you = incrementalArray[i-1];
otheri = i-1;
}
startStart = vertexArray[me-1];
startStop = vertexArray[me];
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
neighbourArray[j]=you;
break;
}
}
__syncthreads();
if (colouring[me-1]!=colouring[you-1]){
return;
}
// if (i==0)
// printf("%d and %d Conflict\n", start, end);
colours[i]=0;
coloursSecond[i]=0;
// if (i==0)
// printf("I am %d and %d and %d\n", i, colours[i], colours[1-i]);
bool bucket[bucketLimitIncr];
for (int j=0; j<maxColour; j++){
bucket[j]=true;
}
// if (i==0){
// printf("%d %d", startStart, startStop);
//
// for (int j=startStart; j<startStop; j++){
//
// printf("clo %d\n", neighbourArray[j]);
//
// if (neighbourArray[j]!=0){
// printf("clocli %d\n", colouring[neighbourArray[j]-1]);
// }
// }
// }
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
continue;
}
bucket[colouring[neighbourArray[j]-1]-1] = false;
// if (i==0)
// printf("buvket clo %d and %d and %d\n", neighbourArray[j]-1, colouring[neighbourArray[j]-1], bucket[colouring[neighbourArray[j]-1]-1]);
}
// for (int j=0; j<maxColour; j++){
// if(bucket[j]){
// colours[i]=j+1;
//// printf("%d ashhas \t", j+1);
// break;
// }
// }
//
bool first = true;
for (int j=0; j<maxColour; j++){
if(bucket[j]){
if (first){
colours[i]=j+1;
first = false;
}
// printf("%d ashhas \t", j+1);
else{
coloursSecond[i]=j+1;
break;
}
}
}
// if (i==0)
// for (int j=0; j<maxColour; j++){
// printf("%d \t",bucket[j]);
// }
// if (i==0){
// printf("\n");
// }
__syncthreads();
//__threadfence();
// printf("%d and %d Conflict new colour min %d \n", start, end, colours[i]);
// Possible issue: There could be a number inbetween the smallest equal guess and the current colour.
//printf("", colours[otheri]); ?????????????
// printf("colours[%d] = %d\n", i, colours[i]);
// printf("coloursOthers[%d] = %d\n", otheri, colours[otheri]);
//colours[otheri]+=0;
if (colours[i]==colours[otheri]){
if (colours[i]<colouring[me-1]){
if (coloursSecond[i] < coloursSecond[otheri]){
if (coloursSecond[i] < colouring[me-1]){
colouring[me-1]=coloursSecond[i];
}
}
else if (coloursSecond[i] == coloursSecond[otheri]) {
if (ipercent2){
colouring[me-1]=colours[i];
}
else{
if (coloursSecond[i] < colouring[me-1]){
colouring[me-1]=coloursSecond[i];
}
}
}
else{
colouring[me-1]=colours[i];
}
}
else{
if (!ipercent2){
colouring[me-1]=colours[i];
}
}
}
// if (colours[i]==colours[otheri]){
//// printf("if\n");
// if (colours[i]<colouring[me-1]){
// if(ipercent2){
// colouring[me-1]=colours[i];
// }
// }
//
// else{
// if (!ipercent2){
// colouring[me-1]=colours[i];
// }
// }
// }
else{
// printf("else\n");
if (colours[i]<colouring[me-1]){
colouring[me-1]=colours[i];
}
else{
if (colours[i]<colours[otheri]){
colouring[me-1]=colours[i];
}
}
}
// __syncthreads();
//
// if (i==0){
// for (int j=0; j<n; j++){
// printf("%d ", vertexArray[j]);
// }
// printf("\n");
//
// for (int j=0; j<m; j++){
// printf("%d ", neighbourArray[j]);
// }
// printf("\n");
// }
// if (i==0){
// for (int j=0; j<n; j++){
// printf("%d ", colouring[j]);
// }
// printf("\n");
// }
//
}
//__global__ void incrementalColouringNewP1 (int *vertexArray, int *neighbourArray, int n, int m, int *colouring, int *incrementalArray, int incrementalCount, int maxColour, int *colours){
//
// int i = blockDim.x * blockIdx.x + threadIdx.x;
//
// if (i >= incrementalCount){
// return;
// }
//
// int startStart, startStop;
// int me, you;
// int otheri;
// bool ipercent2 = false;
//
// me = incrementalArray[i];
//
// if (i%2 == 0){
// you = incrementalArray[i+1];
// otheri = i+1;
// ipercent2 = true;
// }
// else{
// you = incrementalArray[i-1];
// otheri = i-1;
// }
//
// startStart = vertexArray[me-1];
//
// if (me==n){
// startStop = 2*m;
// }
//
// else{
// startStop = vertexArray[me];
// }
//
// for (int j=startStart; j<startStop; j++){
// if (neighbourArray[j]==0){
// neighbourArray[j]=you;
// break;
// }
// }
//
//}
//__global__ void incrementalColouringNewP2 (int *vertexArray, int *neighbourArray, int n, int m, int *colouring, int *incrementalArray, int incrementalCount, int maxColour, int *colours){
//
// int i = blockDim.x * blockIdx.x + threadIdx.x;
//
// if (i >= incrementalCount){
// return;
// }
//
// int startStart, startStop;
// int me, you;
// int otheri;
// bool ipercent2 = false;
//
// me = incrementalArray[i];
//
// if (i%2 == 0){
// you = incrementalArray[i+1];
// otheri = i+1;
// ipercent2 = true;
// }
// else{
// you = incrementalArray[i-1];
// otheri = i-1;
// }
//
// colours[i]=0;
//
//
// bool bucket[bucketLimitIncr];
//
// for (int j=0; j<maxColour; j++){
// bucket[j]=true;
// }
//
// for (int j=startStart; j<startStop; j++){
// if (neighbourArray[j]==0){
// continue;
// }
//
// bucket[colouring[neighbourArray[j]-1]-1] = false;
// }
//
// for (int j=0; j<maxColour; j++){
// if(bucket[j]){
// colours[i]=j+1;
// break;
// }
// }
//
//}
//__global__ void incrementalColouringNewP3 (int *vertexArray, int *neighbourArray, int n, int m, int *colouring, int *incrementalArray, int incrementalCount, int maxColour, int *colours){
//
// int i = blockDim.x * blockIdx.x + threadIdx.x;
//
// if (i >= incrementalCount){
// return;
// }
//
// int startStart, startStop;
// int me, you;
// int otheri;
// bool ipercent2 = false;
//
// me = incrementalArray[i];
//
// if (i%2 == 0){
// you = incrementalArray[i+1];
// otheri = i+1;
// ipercent2 = true;
// }
// else{
// you = incrementalArray[i-1];
// otheri = i-1;
// }
//
// if (colouring[me-1]!=colouring[you-1]){
// return;
// }
//
// if (colours[i]==colours[otheri]){
// printf("if\n");
// if (colours[i]<colouring[me-1]){
// if(ipercent2){
// colouring[me-1]=colours[i];
// }
// }
//
// else{
// if (!ipercent2){
// colouring[me-1]=colours[i];
// }
// }
// }
//
// else{
// printf("else\n");
// if (colours[i]<colouring[me-1]){
// colouring[me-1]=colours[i];
// }
//
// else{
// if (colours[i]<colours[otheri]){
// colouring[me-1]=colours[i];
// }
// }
// }
//
//
//}
__global__ void incrementalColouring (int *vertexArray, int *neighbourArray, int n, int m, int *colouring, int start, int end, int maxColour){
int i = threadIdx.x;
int startStart, startStop;
int me, you;
if (i==0){
me = start;
you = end;
}
else{
me = end;
you = start;
}
startStart = vertexArray[me-1];
if (me==n){
startStop = 2*m;
}
else{
startStop = vertexArray[me];
}
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
neighbourArray[j]=you;
break;
}
}
__syncthreads();
if (colouring[start-1]!=colouring[end-1]){
return;
}
// if (i==0)
// printf("%d and %d Conflict\n", start, end);
__shared__ int colours[2];
colours[i]=0;
// if (i==0)
// printf("I am %d and %d and %d\n", i, colours[i], colours[1-i]);
bool bucket[bucketLimitIncr];
for (int j=0; j<maxColour; j++){
bucket[j]=true;
}
// if (i==0){
// printf("%d %d", startStart, startStop);
//
// for (int j=startStart; j<startStop; j++){
//
// printf("clo %d\n", neighbourArray[j]);
//
// if (neighbourArray[j]!=0){
// printf("clocli %d\n", colouring[neighbourArray[j]-1]);
// }
// }
// }
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
continue;
}
bucket[colouring[neighbourArray[j]-1]-1] = false;
// if (i==0)
// printf("buvket clo %d and %d and %d\n", neighbourArray[j]-1, colouring[neighbourArray[j]-1], bucket[colouring[neighbourArray[j]-1]-1]);
}
for (int j=0; j<maxColour; j++){
if(bucket[j]){
colours[i]=j+1;
// printf("%d ashhas \t", j+1);
break;
}
}
// if (i==0)
// for (int j=0; j<maxColour; j++){
// printf("%d \t",bucket[j]);
// }
// if (i==0){
// printf("\n");
// }
__syncthreads();
// printf("%d and %d Conflict new colour min %d \n", start, end, colours[i]);
// Possible issue: There could be a number inbetween the smallest equal guess and the current colour.
if (colours[i]==colours[1-i]){
if (colours[i]<colouring[me-1]){
if(i==0){
colouring[me-1]=colours[i];
}
}
else{
if (i==1){
colouring[me-1]=colours[i];
}
}
}
else{
if (colours[i]<colouring[me-1]){
colouring[me-1]=colours[i];
}
else{
if (colours[i]<colours[1-i]){
colouring[me-1]=colours[i];
}
}
}
__syncthreads();
// if (i==0){
// for (int j=0; j<n; j++){
// printf("%d ", colouring[j]);
// }
// printf("\n");
// }
}
__global__ void colourMinMax (int *vertexArray, int *neighbourArray, int *numbers, int n, int m, int *colouring, int currentColour){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
if (colouring[i]!=0){
return;
}
int myValue = numbers[i];
// printf("I am node %d with value %d\n", i+1, myMax);
int start = -1, stop = -1;
start = vertexArray[i];
stop = vertexArray[i+1];
bool max = true, min = true;
for (int j=start; j<stop; j++){
// printf("My neighbour %d with value %d from %d \n", neighbourArray[j], numbers[neighbourArray[j]-1], i+1);
int neighbour = neighbourArray[j];
if (neighbour==0){
continue;
}
neighbour--;
if (max && colouring[neighbour]==0 && numbers[neighbour] >= myValue){
if (numbers[neighbour] == myValue){
if (i < neighbour){
continue;
}
}
max=false;
if (!min){
return;
}
}
if (min && colouring[neighbour]==0 && numbers[neighbour] <= myValue){
if (numbers[neighbour] == myValue){
if (i > neighbour){
continue;
}
}
min=false;
if (!max){
return;
}
}
}
if (max){
colouring[i] = currentColour;
}
else if (min){
colouring[i] = currentColour+1;
}
atomicAdd(&d_count, 1);
}
__global__ void setup_kernel (curandState * state, unsigned long seed ){
int i= blockDim.x * blockIdx.x + threadIdx.x;
curand_init (seed, i, 0, &state[i]);
}
__global__ void randomNumbering (curandState* globalState, int *degreeCount, int n, int limit){
int i= blockDim.x * blockIdx.x + threadIdx.x;
curandState localState = globalState[i];
float RANDOM = curand_uniform( &localState );
globalState[i] = localState;
RANDOM *= (limit - 1 + 0.999999);
RANDOM += 1;
degreeCount[i] = (int) RANDOM;
}
__global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
int start = -1, stop = -1;
int diff=0;
start = vertexArray[i];
stop = vertexArray[i+1];
diff = stop-start;
degreeCount[i]=diff;
}
void edgesPrint (int vertexArray[], int neighbourArray[], int n, int m){
for (int i=0; i<n-1; i++){
for (int j = vertexArray[i]; j < vertexArray[i+1]; ++j){
cout<<"e "<<i+1<<" "<<neighbourArray[j]<<endl;
/* code */
}
}
for (int j = vertexArray[n-1]; j < m; ++j)
{
cout<<"e "<<n<<" "<<neighbourArray[j]<<endl;
/* code */
}
}
void addEdge (int *vertexArray, int *neighbourArray, int n, int m, int start, int stop, int &lastStart, int &lastStop){
int startStart, startStop;
int stopStart, stopStop;
startStart = vertexArray[start-1];
stopStart = vertexArray[stop-1];
startStop = vertexArray[start];
stopStop = vertexArray[stop];
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
neighbourArray[j]=stop;
lastStart = j;
break;
}
}
for (int j=stopStart; j<stopStop; j++){
if (neighbourArray[j]==0){
neighbourArray[j]=start;
lastStop = j;
break;
}
}
}
void deleteEdge (int *neighbourArray, int lastStart, int lastStop){
neighbourArray[lastStart]=0;
neighbourArray[lastStop]=0;
}
bool isPermissible (int *incrementalArray, int incrementalCount, int *vertexArray, int *neighbourArray, int n, int m, int start, int stop){
int lastStart = 0, lastStop = 0;
addEdge ( vertexArray, neighbourArray, n, m, start, stop, lastStart, lastStop );
for (int i=0; i<incrementalCount; i++){
if (incrementalArray[i] == start || incrementalArray[i] == stop){
deleteEdge(neighbourArray, lastStart, lastStop);
return false;
}
int startStart, startStop;
startStart = vertexArray[incrementalArray[i]-1];
startStop = vertexArray[incrementalArray[i]];
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
continue;
}
if (neighbourArray[j]==start || neighbourArray[j]==stop){
deleteEdge(neighbourArray, lastStart, lastStop);
return false;
}
}
}
return true;
}
int main(int argc, char const *argv[])
{
int n, m;
cin>>n>>m;
ofstream fout;
fout.open("output4.txt",ios::app);
double rLimit = 1-(30000.0/m);
if (m < 900000){
rLimit = 0.5;
}
int h_maxColour;
int *h_count = new int;
int *h_vertexArray = new int [n+1];
int *h_neighbourArray = new int [2*m];
int *h_degreeCount = new int [n];
int *h_colour = new int [n];
int *h_propagationArray1 = new int [n];
int *h_propagationArray2 = new int [n];
int *d_vertexArray = NULL;
cudaMalloc((void **)&d_vertexArray, (n+1)*sizeof(int));
int *d_neighbourArray = NULL;
cudaMalloc((void **)&d_neighbourArray, 2*m*sizeof(int));
int *d_colour = NULL;
cudaMalloc((void **)&d_colour, (n)*sizeof(int));
cudaMemset((void *)d_colour, 0, (n)*sizeof(int));
int *d_propagationArray1 = NULL;
cudaMalloc((void **)&d_propagationArray1, (1400)*sizeof(int));
cudaMemset((void *)d_propagationArray1, 0, (1400)*sizeof(int));
int *d_propagationArray2 = NULL;
cudaMalloc((void **)&d_propagationArray2, (n)*sizeof(int));
cudaMemset((void *)d_propagationArray2, 0, (n)*sizeof(int));
int *d_degreeCount = NULL;
cudaMalloc((void **)&d_degreeCount, (n)*sizeof(int));
cudaMemset((void *)d_degreeCount, 0, (n)*sizeof(int));
curandState* devStates;
cudaMalloc ( &devStates, n*sizeof( curandState ) );
int offset = 0;
vector<int> startArray, stopArray;
cin>>h_maxColour;
for (int i = 0; i < n; ++i)
{
h_vertexArray[i]=offset;
int degree;
cin>>degree;
offset+=degree;
h_propagationArray1[i]=0;
}
h_vertexArray[n]=2*m;
for (int i = 0; i < 2*m; ++i)
{
h_neighbourArray[i]=0;
}
for (int i = 0; i < m; ++i)
{
int start;
int end;
cin>>start>>end;
double r = ((double) rand() / (RAND_MAX));
int startStart, startStop, stopStart, stopStop;
startStart = h_vertexArray[start-1];
startStop = h_vertexArray[start];
stopStart = h_vertexArray[end-1];
stopStop = h_vertexArray[end];
for (int j=startStart; j<startStop; j++){
if (h_neighbourArray[j]==0){
h_neighbourArray[j]=end;
break;
}
}
for (int j=stopStart; j<stopStop; j++){
if (h_neighbourArray[j]==0){
h_neighbourArray[j]=start;
break;
}
}
if (r>rLimit){
startArray.push_back(start);
stopArray.push_back(end);
}
}
// for (int i=0; i<n+1; i++){
// cout<<h_vertexArray[i]<<" ";
// }
//
// cout<<endl;
//
// for (int i=0; i<2*m; i++){
// cout<<h_neighbourArray[i]<<" ";
// }
//
// cout<<endl;
cudaEvent_t start, stop;
float timeNew;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMemcpy(d_vertexArray, h_vertexArray, (n+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_neighbourArray, h_neighbourArray, 2*m*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
int threadsPerBlock = 512;
int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock;
cudaEventRecord(start, 0);
setup_kernel <<<blocksPerGrid, threadsPerBlock>>> ( devStates, time(NULL) );
randomNumbering<<<blocksPerGrid, threadsPerBlock>>>(devStates, d_degreeCount, n, n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
// cudaMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), cudaMemcpyDeviceToHost);
// cout<<"Random numbers: "<<endl;
//
// for (int i=0; i<n; i++){
// cout<<h_degreeCount[i]<<endl;
// }
int colourCount = 1;
// cout<<"Worklist: "<<endl;
//
// for (int i=0; i<startArray.size(); i++){
// cout<<startArray[i]<<" "<<stopArray[i]<<endl;
// }
cudaEventRecord(start, 0);
while (1){
colourMinMax<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_colour, colourCount);
cudaMemcpyFromSymbol(h_count, d_count, sizeof(int), 0, cudaMemcpyDeviceToHost);
// cout<<"H Count = "<<*h_count<<"at colour: "<<colourCount<<endl;
if (*h_count == n){
break;
}
colourCount+=2;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
colourCount++;
thrust::device_ptr<int> c_ptr = thrust::device_pointer_cast(d_colour);
int maxColour = *(thrust::max_element(c_ptr, c_ptr + n));
cout<<"Max Colour = "<<maxColour<<endl;
fout<<maxColour<<"\t";
int maxColourNew;
thrust::device_ptr<int> d_propagationArray_ptr = thrust::device_pointer_cast(d_propagationArray1);
maxColourNew = 0;
colourCountFunc<<< blocksPerGrid, threadsPerBlock >>>(d_colour, n, d_propagationArray1);
maxColourNew = thrust::reduce(d_propagationArray_ptr, d_propagationArray_ptr + 1400);
cudaMemset((void *)d_propagationArray1, 0, (1400)*sizeof(int));
fout<<maxColourNew<<"\t";
// cudaMemcpy(h_colour, d_colour, n*sizeof(int), cudaMemcpyDeviceToHost);
// cout<<"Colour numbers: "<<endl;
//
//
//
// for (int i=0; i<n; i++){
// cout<<h_colour[i]<<endl;
// }
cout<<"Size: "<<startArray.size()<<endl;
fout<<startArray.size()<<"\t";
int *d_incrementalArray = NULL;
cudaMalloc((void **)&d_incrementalArray, 2*startArray.size()*sizeof(int));
// int *d_colours = NULL;
// cudaMalloc((void **)&d_colours, 1024*sizeof(int));
//
// int *d_coloursSecond = NULL;
// cudaMalloc((void **)&d_coloursSecond, 1024*sizeof(int));
//
int *h_incrementalArray = new int [2*startArray.size()];
//
// vector<bool> marked (startArray.size(), false);
//
// int incrementalCount = 0;
//
//// cudaMemcpy(h_colour, d_colour, n*sizeof(int), cudaMemcpyDeviceToHost);
////
////
//// cout<<"Colour numbers: "<<endl;
////
////
////
//// for (int i=0; i<n; i++){
//// cout<<h_colour[i]<<endl;
//// }
// int printCount = 0;
//
// cudaEventRecord(start, 0);
//
//
// for (int i=0; i<startArray.size(); i++){
// if (marked[i]){
// continue;
// }
//
// int lastStart, lastStop;
//
// incrementalCount = 0;
//
// addEdge(h_vertexArray, h_neighbourArray, n, m, startArray[i], stopArray[i], lastStart, lastStop);
//
// marked[i]=true;
//
// h_incrementalArray[incrementalCount] = startArray[i];
// h_incrementalArray[incrementalCount+1] = stopArray[i];
//
// incrementalCount+=2;
//
// for (int j = i+1; j<startArray.size(); j++){
// if (marked[j]){
// continue;
// }
//
// if (isPermissible (h_incrementalArray, incrementalCount, h_vertexArray, h_neighbourArray, n, m, startArray[j], stopArray[j])){
// marked[j]=true;
// h_incrementalArray[incrementalCount] = startArray[j];
// h_incrementalArray[incrementalCount+1] = stopArray[j];
//
// incrementalCount+=2;
//
// if (incrementalCount == 1024){
// break;
// }
// }
// }
//
//// for (int j=0; j<incrementalCount; j++){
//// cout<<h_incrementalArray[j]<<" ";
//// }
//// cout<<endl;
// int threadsPerBlockIncremental=1024;
// int blocksPerGridIncremental = (incrementalCount + threadsPerBlockIncremental -1)/threadsPerBlockIncremental;
//
// if (blocksPerGridIncremental!=1){
// cout<<"DANGER DANGER DANGER DANGER DANGER DANGER DANGER DANGER"<<endl;
// }
//
// cudaMemcpy(d_incrementalArray, h_incrementalArray, incrementalCount*sizeof(int), cudaMemcpyHostToDevice);
//
//// cout<<incrementalCount<<endl;
// incrementalColouringNew<<<blocksPerGridIncremental, threadsPerBlockIncremental>>>(d_vertexArray, d_neighbourArray, n, m, d_colour, d_incrementalArray, incrementalCount, 1400, d_colours, d_coloursSecond);
// printCount++;
//// incrementalColouringNewP1<<<threadsPerBlock, blocksPerGridIncremental>>>(d_vertexArray, d_neighbourArray, n, m, d_colour, d_incrementalArray, incrementalCount, h_maxColour, d_colours);
//// incrementalColouringNewP2<<<threadsPerBlock, blocksPerGridIncremental>>>(d_vertexArray, d_neighbourArray, n, m, d_colour, d_incrementalArray, incrementalCount, h_maxColour, d_colours);
//// incrementalColouringNewP3<<<threadsPerBlock, blocksPerGridIncremental>>>(d_vertexArray, d_neighbourArray, n, m, d_colour, d_incrementalArray, incrementalCount, h_maxColour, d_colours);
//
//
//// cudaDeviceSynchronize();
//
//// cudaMemcpy(h_colour, d_colour, n*sizeof(int), cudaMemcpyDeviceToHost);
////
////
//// cout<<"Colour numbers: "<<endl;
////
////
////
//// for (int i=0; i<n; i++){
//// cout<<h_colour[i]<<endl;
//// }
////
//
//
// }
//
//
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
//
// cudaEventElapsedTime(&timeNew, start, stop);
//
// fout<<timeNew<<"\t";
//
// fout<<printCount<<"\t";
//
// cudaMemcpy(h_colour, d_colour, n*sizeof(int), cudaMemcpyDeviceToHost);
// cout<<"Colour numbers: "<<endl;
//
//
//
// for (int i=0; i<n; i++){
// cout<<h_colour[i]<<endl;
// }
// for (int i=0; i<startArray.size(); i++){
//
// int startStart = h_vertexArray[startArray[i]-1];
// int stopStart = h_vertexArray[stopArray[i]-1];
// int startStop = 0;
// int stopStop = 0;
//
// if (startArray[i]==n){
// startStop = 2*m;
// }
//
// else{
// startStop = h_vertexArray[startArray[i]];
// }
//
// if (stopArray[i]==n){
// stopStop = 2*m;
// }
//
// else{
// stopStop = h_vertexArray[stopArray[i]];
// }
//
// for (int j=startStart; j<startStop; j++){
// if (neighbourArray[j]==0){
// neighbourArray[j]=stopArray[i];
// break;
// }
// }
//
// for (int j=stopStart; j<stopStop; j++){
// if (neighbourArray[j]==0){
// neighbourArray[j]=startArray[i];
// break;
// }
// }
//
// //cout<<"New added edge: "<<startArray[i]<<" "<<stopArray[i]<<endl;
// if (incrementalCount == 0){
// h_incrementalArray[incrementalCount] = startArray[i];
// h_incrementalArray[incrementalCount+1] = stopArray[i];
//
// incrementalCount+=2;
// }
//
// else{
// for (int j=0; j<incrementalCount; j++){
//
// startStart = h_vertexArray[h_incrementalArray[j]];
// startStop = 0;
//
// if (h_incrementalArray[j]==n){
// startStop = 2*m;
// }
//
// else{
// startStop = h_vertexArray[h_incrementalArray[j]];
// }
// }
// }
//
//
//
// incrementalColouring<<<1, 2>>>(d_vertexArray, d_neighbourArray, n, m, d_colour, startArray[i], stopArray[i], h_maxColour);
//
// cudaDeviceSynchronize();
//
// }
//
// set<int> tempSet;
// set<int>::iterator it;
for (int i=0; i<startArray.size(); i++){
h_incrementalArray[2*i]=startArray[i];
h_incrementalArray[2*i+1]=stopArray[i];
// h_propagationArray1[startArray[i]-1]=1;
// h_propagationArray1[stopArray[i]-1]=1;
//
// tempSet.insert(startArray[i]);
// tempSet.insert(stopArray[i]);
}
// for (int i=0; i<tempSet.size(); i++){
// h_propagationArray[i] = tempSet[i];
// }
// cout<<"Decremental Array:"<<endl;
//
//
// for (int i=0; i<startArray.size(); i++){
//
// cout<<h_incrementalArray[2*i]<<" "<<h_incrementalArray[2*i+1]<<endl;
//
// }
// cudaMemcpy(h_vertexArray, d_vertexArray, (n+1)*sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(h_neighbourArray, d_neighbourArray, 2*m*sizeof(int), cudaMemcpyDeviceToHost);
//
// for (int i=0; i<(n+1); i++){
// cout<<h_vertexArray[i]<<" ";
// }
//
// cout<<endl;
//
// for (int i=0; i<2*m; i++){
// cout<<h_neighbourArray[i]<<" ";
// }
//
// cout<<endl;
cudaEventRecord(start, 0);
cudaMemcpy(d_incrementalArray, h_incrementalArray, 2*startArray.size()*sizeof(int), cudaMemcpyHostToDevice);
int blocksPerGridDecremental = (2*startArray.size() + threadsPerBlock -1)/threadsPerBlock;
decrementalColouringNew<<<blocksPerGridDecremental, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, n, m, d_incrementalArray, 2*startArray.size());
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
// cudaDeviceSynchronize();
// cudaMemcpy(h_colour, d_colour, n*sizeof(int), cudaMemcpyDeviceToHost);
// cout<<"Colour numbers: "<<endl;
//
//
//
// for (int i=0; i<n; i++){
// cout<<h_colour[i]<<endl;
// }
//
// cudaMemcpy(d_propagationArray1, h_propagationArray1, n*sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(d_propagationArray1, h_propagationArray, tempSet.size()*sizeof(int), cudaMemcpyHostToDevice);
// bool flip = true;
//
// int blocksPerGridPropagation = (n + threadsPerBlock -1)/threadsPerBlock;
//
// while (1){
// cudaMemcpyFromSymbol(h_count, d_count, sizeof(int), 0, cudaMemcpyDeviceToHost);
//
// if (*h_count == 0){
// break;
// }
//
// blocksPerGridPropagation = (*h_count + threadsPerBlock -1)/threadsPerBlock;
//
// if (flip){
// flip = false;
// }
//
// else{
// flip = true;
// }
//
//
// }
//
//cudaMemcpy(h_colour, d_colour, n*sizeof(int), cudaMemcpyDeviceToHost);
// cout<<"Colour numbers: "<<endl;
//
//
//
// for (int i=0; i<n; i++){
// cout<<h_colour[i]<<endl;
// }
// cout<<"{ ";
//
// for (int i=0; i<n; i++){
// if (h_propagationArray1[i]!=0){
// cout<<i+1<<" ";
// }
// }
//
// cout<<"}"<<endl;
maxColour = *(thrust::max_element(c_ptr, c_ptr + n));
cout<<"Max Colour = "<<maxColour<<endl;
fout<<maxColour<<"\t";
maxColourNew = 0;
colourCountFunc<<< blocksPerGrid, threadsPerBlock >>>(d_colour, n, d_propagationArray1);
maxColourNew = thrust::reduce(d_propagationArray_ptr, d_propagationArray_ptr + 1400);
cudaMemset((void *)d_propagationArray1, 0, (1400)*sizeof(int));
fout<<maxColourNew<<"\t";
cudaEventRecord(start, 0);
while (1){
propagationColouringNewest<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_colour, d_propagationArray2);
cudaMemcpyFromSymbol(h_count, d_countNew, sizeof(int), 0, cudaMemcpyDeviceToHost);
// cout<<"H Count = "<<*h_count<<endl;
if (*h_count == n){
break;
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
maxColour = *(thrust::max_element(c_ptr, c_ptr + n));
cout<<"Max Colour = "<<maxColour<<endl;
fout<<maxColour<<"\t";
maxColourNew = 0;
colourCountFunc<<< blocksPerGrid, threadsPerBlock >>>(d_colour, n, d_propagationArray1);
maxColourNew = thrust::reduce(d_propagationArray_ptr, d_propagationArray_ptr + 1400);
cudaMemset((void *)d_propagationArray1, 0, (1400)*sizeof(int));
fout<<maxColourNew<<"\t";
// int countPropagation = 0;
// thrust::device_ptr<int> d_propagationArray_ptr = thrust::device_pointer_cast(d_propagationArray1);
// do{
//
// propagationColouring<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, n, m, d_degreeCount, d_colour, d_propagationArray1);
//
// cudaDeviceSynchronize();
//
// countPropagation = thrust::reduce(d_propagationArray_ptr, d_propagationArray_ptr + n);
//
// cout<<countPropagation<<endl;
// cudaMemcpy(h_propagationArray1, d_propagationArray1, n*sizeof(int), cudaMemcpyDeviceToHost);
//
//// cout<<"{ ";
////
//// for (int i=0; i<n; i++){
//// if (h_propagationArray1[i]!=0){
//// cout<<i+1<<" ";
//// }
//// }
////
//// cout<<"}"<<endl;
//
//// cudaMemcpy(h_colour, d_colour, n*sizeof(int), cudaMemcpyDeviceToHost);
//// cout<<"Colour numbers: "<<endl;
////
////
////
//// for (int i=0; i<n; i++){
//// cout<<h_colour[i]<<endl;
//// }
//
// }while (countPropagation);
//
// cout<<"Shamil "<<printCount<<endl;
cudaEventRecord(start, 0);
cudaMemcpy(h_colour, d_colour, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\n";
// cout<<"Colour numbers: "<<endl;
//
//
//
// for (int i=0; i<n; i++){
// cout<<h_colour[i]<<endl;
// }
// cout<<"Time for the kernel: "<<time<<" ms"<<endl;
// cudaMemcpy(h_vertexArray, d_vertexArray, (n+1)*sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(h_neighbourArray, d_neighbourArray, 2*m*sizeof(int), cudaMemcpyDeviceToHost);
// for (int i=0; i<n+1; i++){
// cout<<h_vertexArray[i]<<" ";
// }
//
// cout<<endl;
//
// for (int i=0; i<2*m; i++){
// cout<<h_neighbourArray[i]<<" ";
// }
//
// cout<<endl;
delete h_count;
delete[] h_vertexArray;
delete[] h_neighbourArray;
delete[] h_degreeCount;
delete[] h_colour;
cudaFree(d_neighbourArray);
cudaFree(d_vertexArray);
cudaFree(d_degreeCount);
cudaFree(d_colour);
cudaDeviceReset();
return 0;
}
|
2333998d78b9f33588c0ae7bfa020a31eff7add0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ static void gaussdensity_direct_alt(int natoms, const float4 *xyzr, float gridspacing, unsigned int z, float *densitygrid) {
unsigned int xindex = (blockIdx.x * blockDim.x) * DUNROLLX + threadIdx.x;
unsigned int yindex = (blockIdx.y * blockDim.y) + threadIdx.y;
unsigned int zindex = (blockIdx.z * blockDim.z) + threadIdx.z;
unsigned int outaddr =
((gridDim.x * blockDim.x) * DUNROLLX) * (gridDim.y * blockDim.y) * zindex +
((gridDim.x * blockDim.x) * DUNROLLX) * yindex + xindex;
zindex += z;
float coorx = gridspacing * xindex;
float coory = gridspacing * yindex;
float coorz = gridspacing * zindex;
float densityvalx1=0.0f;
float densityvalx2=0.0f;
#if DUNROLLX >= 4
float densityvalx3=0.0f;
float densityvalx4=0.0f;
#endif
#if DUNROLLX >= 8
float densityvalx5=0.0f;
float densityvalx6=0.0f;
float densityvalx7=0.0f;
float densityvalx8=0.0f;
#endif
float gridspacing_coalesce = gridspacing * DBLOCKSZX;
int atomid;
for (atomid=0; atomid<natoms; atomid++) {
float4 atom = xyzr[atomid];
float dy = coory - atom.y;
float dz = coorz - atom.z;
float dyz2 = dy*dy + dz*dz;
float dx1 = coorx - atom.x;
float r21 = (dx1*dx1 + dyz2) * atom.w;
densityvalx1 += exp2f(-r21);
float dx2 = dx1 + gridspacing_coalesce;
float r22 = (dx2*dx2 + dyz2) * atom.w;
densityvalx2 += exp2f(-r22);
#if DUNROLLX >= 4
float dx3 = dx2 + gridspacing_coalesce;
float r23 = (dx3*dx3 + dyz2) * atom.w;
densityvalx3 += exp2f(-r23);
float dx4 = dx3 + gridspacing_coalesce;
float r24 = (dx4*dx4 + dyz2) * atom.w;
densityvalx4 += exp2f(-r24);
#endif
#if DUNROLLX >= 8
float dx5 = dx4 + gridspacing_coalesce;
float r25 = (dx5*dx5 + dyz2) * atom.w;
densityvalx5 += exp2f(-r25);
float dx6 = dx5 + gridspacing_coalesce;
float r26 = (dx6*dx6 + dyz2) * atom.w;
densityvalx6 += exp2f(-r26);
float dx7 = dx6 + gridspacing_coalesce;
float r27 = (dx7*dx7 + dyz2) * atom.w;
densityvalx7 += exp2f(-r27);
float dx8 = dx7 + gridspacing_coalesce;
float r28 = (dx8*dx8 + dyz2) * atom.w;
densityvalx8 += exp2f(-r28);
#endif
}
densitygrid[outaddr ] += densityvalx1;
densitygrid[outaddr+1*DBLOCKSZX] += densityvalx2;
#if DUNROLLX >= 4
densitygrid[outaddr+2*DBLOCKSZX] += densityvalx3;
densitygrid[outaddr+3*DBLOCKSZX] += densityvalx4;
#endif
#if DUNROLLX >= 8
densitygrid[outaddr+4*DBLOCKSZX] += densityvalx5;
densitygrid[outaddr+5*DBLOCKSZX] += densityvalx6;
densitygrid[outaddr+6*DBLOCKSZX] += densityvalx7;
densitygrid[outaddr+7*DBLOCKSZX] += densityvalx8;
#endif
} | 2333998d78b9f33588c0ae7bfa020a31eff7add0.cu | #include "includes.h"
__global__ static void gaussdensity_direct_alt(int natoms, const float4 *xyzr, float gridspacing, unsigned int z, float *densitygrid) {
unsigned int xindex = (blockIdx.x * blockDim.x) * DUNROLLX + threadIdx.x;
unsigned int yindex = (blockIdx.y * blockDim.y) + threadIdx.y;
unsigned int zindex = (blockIdx.z * blockDim.z) + threadIdx.z;
unsigned int outaddr =
((gridDim.x * blockDim.x) * DUNROLLX) * (gridDim.y * blockDim.y) * zindex +
((gridDim.x * blockDim.x) * DUNROLLX) * yindex + xindex;
zindex += z;
float coorx = gridspacing * xindex;
float coory = gridspacing * yindex;
float coorz = gridspacing * zindex;
float densityvalx1=0.0f;
float densityvalx2=0.0f;
#if DUNROLLX >= 4
float densityvalx3=0.0f;
float densityvalx4=0.0f;
#endif
#if DUNROLLX >= 8
float densityvalx5=0.0f;
float densityvalx6=0.0f;
float densityvalx7=0.0f;
float densityvalx8=0.0f;
#endif
float gridspacing_coalesce = gridspacing * DBLOCKSZX;
int atomid;
for (atomid=0; atomid<natoms; atomid++) {
float4 atom = xyzr[atomid];
float dy = coory - atom.y;
float dz = coorz - atom.z;
float dyz2 = dy*dy + dz*dz;
float dx1 = coorx - atom.x;
float r21 = (dx1*dx1 + dyz2) * atom.w;
densityvalx1 += exp2f(-r21);
float dx2 = dx1 + gridspacing_coalesce;
float r22 = (dx2*dx2 + dyz2) * atom.w;
densityvalx2 += exp2f(-r22);
#if DUNROLLX >= 4
float dx3 = dx2 + gridspacing_coalesce;
float r23 = (dx3*dx3 + dyz2) * atom.w;
densityvalx3 += exp2f(-r23);
float dx4 = dx3 + gridspacing_coalesce;
float r24 = (dx4*dx4 + dyz2) * atom.w;
densityvalx4 += exp2f(-r24);
#endif
#if DUNROLLX >= 8
float dx5 = dx4 + gridspacing_coalesce;
float r25 = (dx5*dx5 + dyz2) * atom.w;
densityvalx5 += exp2f(-r25);
float dx6 = dx5 + gridspacing_coalesce;
float r26 = (dx6*dx6 + dyz2) * atom.w;
densityvalx6 += exp2f(-r26);
float dx7 = dx6 + gridspacing_coalesce;
float r27 = (dx7*dx7 + dyz2) * atom.w;
densityvalx7 += exp2f(-r27);
float dx8 = dx7 + gridspacing_coalesce;
float r28 = (dx8*dx8 + dyz2) * atom.w;
densityvalx8 += exp2f(-r28);
#endif
}
densitygrid[outaddr ] += densityvalx1;
densitygrid[outaddr+1*DBLOCKSZX] += densityvalx2;
#if DUNROLLX >= 4
densitygrid[outaddr+2*DBLOCKSZX] += densityvalx3;
densitygrid[outaddr+3*DBLOCKSZX] += densityvalx4;
#endif
#if DUNROLLX >= 8
densitygrid[outaddr+4*DBLOCKSZX] += densityvalx5;
densitygrid[outaddr+5*DBLOCKSZX] += densityvalx6;
densitygrid[outaddr+6*DBLOCKSZX] += densityvalx7;
densitygrid[outaddr+7*DBLOCKSZX] += densityvalx8;
#endif
} |
721bfc0e88c36eea14eb2e07e3ac189917a58b48.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/extension.h"
template <typename data_t>
__global__ void relu_cuda_forward_kernel(const data_t* x,
data_t* y,
const int num) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = gid; i < num; i += blockDim.x * gridDim.x) {
y[i] = x[i] > static_cast<data_t>(0.) ? x[i] : static_cast<data_t>(0.);
}
}
template <typename data_t>
__global__ void relu_cuda_backward_kernel(const data_t* dy,
const data_t* y,
data_t* dx,
const int num) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = gid; i < num; i += blockDim.x * gridDim.x) {
dx[i] = dy[i] * (y[i] > static_cast<data_t>(0.) ? static_cast<data_t>(1.)
: static_cast<data_t>(0.));
}
}
std::vector<paddle::Tensor> relu_cuda_forward(const paddle::Tensor& x) {
auto out = paddle::Tensor(paddle::PlaceType::kGPU, x.shape());
int numel = x.size();
int block = 512;
int grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_AND_HALF_TYPES(
x.type(), "relu_cuda_forward_kernel", ([&] {
hipLaunchKernelGGL(( relu_cuda_forward_kernel<data_t>), dim3(grid), dim3(block), 0, x.stream(),
x.data<data_t>(), out.mutable_data<data_t>(x.place()), numel);
}));
return {out};
}
std::vector<paddle::Tensor> relu_cuda_backward(const paddle::Tensor& x,
const paddle::Tensor& out,
const paddle::Tensor& grad_out) {
auto grad_x = paddle::Tensor(paddle::PlaceType::kGPU, x.shape());
int numel = out.size();
int block = 512;
int grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_AND_HALF_TYPES(
out.type(), "relu_cuda_backward_kernel", ([&] {
hipLaunchKernelGGL(( relu_cuda_backward_kernel<data_t>), dim3(grid), dim3(block), 0, x.stream(),
grad_out.data<data_t>(),
out.data<data_t>(),
grad_x.mutable_data<data_t>(x.place()),
numel);
}));
return {grad_x};
}
| 721bfc0e88c36eea14eb2e07e3ac189917a58b48.cu | // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/extension.h"
template <typename data_t>
__global__ void relu_cuda_forward_kernel(const data_t* x,
data_t* y,
const int num) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = gid; i < num; i += blockDim.x * gridDim.x) {
y[i] = x[i] > static_cast<data_t>(0.) ? x[i] : static_cast<data_t>(0.);
}
}
template <typename data_t>
__global__ void relu_cuda_backward_kernel(const data_t* dy,
const data_t* y,
data_t* dx,
const int num) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = gid; i < num; i += blockDim.x * gridDim.x) {
dx[i] = dy[i] * (y[i] > static_cast<data_t>(0.) ? static_cast<data_t>(1.)
: static_cast<data_t>(0.));
}
}
std::vector<paddle::Tensor> relu_cuda_forward(const paddle::Tensor& x) {
auto out = paddle::Tensor(paddle::PlaceType::kGPU, x.shape());
int numel = x.size();
int block = 512;
int grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_AND_HALF_TYPES(
x.type(), "relu_cuda_forward_kernel", ([&] {
relu_cuda_forward_kernel<data_t><<<grid, block, 0, x.stream()>>>(
x.data<data_t>(), out.mutable_data<data_t>(x.place()), numel);
}));
return {out};
}
std::vector<paddle::Tensor> relu_cuda_backward(const paddle::Tensor& x,
const paddle::Tensor& out,
const paddle::Tensor& grad_out) {
auto grad_x = paddle::Tensor(paddle::PlaceType::kGPU, x.shape());
int numel = out.size();
int block = 512;
int grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_AND_HALF_TYPES(
out.type(), "relu_cuda_backward_kernel", ([&] {
relu_cuda_backward_kernel<data_t><<<grid, block, 0, x.stream()>>>(
grad_out.data<data_t>(),
out.data<data_t>(),
grad_x.mutable_data<data_t>(x.place()),
numel);
}));
return {grad_x};
}
|
e5180bff9766165d4afa71e72974485a70c99323.hip | // !!! This is a file automatically generated by hipify!!!
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <iostream>
__global__ void deviceCublasSgemm(int n, float alpha, float beta,
const float* d_A, const float* d_B,
float* d_C)
{
hipblasHandle_t cnpHandle;
hipblasStatus_t status = hipblasCreate(&cnpHandle);
if (status != HIPBLAS_STATUS_SUCCESS) {
return;
}
// Call function defined in the cublas_device system static library.
// This way we can verify that we properly pass system libraries to the
// device link line
status = hipblasSgemm(cnpHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, n, &alpha,
d_A, n, d_B, n, &beta, d_C, n);
hipblasDestroy(cnpHandle);
}
int choose_cuda_device()
{
int nDevices = 0;
hipError_t err = hipGetDeviceCount(&nDevices);
if (err != hipSuccess) {
std::cerr << "Failed to retrieve the number of CUDA enabled devices"
<< std::endl;
return 1;
}
for (int i = 0; i < nDevices; ++i) {
hipDeviceProp_t prop;
hipError_t err = hipGetDeviceProperties(&prop, i);
if (err != hipSuccess) {
std::cerr << "Could not retrieve properties from CUDA device " << i
<< std::endl;
return 1;
}
if (prop.major > 3 || (prop.major == 3 && prop.minor >= 5)) {
err = hipSetDevice(i);
if (err != hipSuccess) {
std::cout << "Could not select CUDA device " << i << std::endl;
} else {
return 0;
}
}
}
std::cout << "Could not find a CUDA enabled card supporting compute >=3.5"
<< std::endl;
return 1;
}
int main(int argc, char** argv)
{
int ret = choose_cuda_device();
if (ret) {
return 0;
}
// initial values that will make sure that the hipblasSgemm won't actually
// do any work
int n = 0;
float alpha = 1;
float beta = 1;
float* d_A = nullptr;
float* d_B = nullptr;
float* d_C = nullptr;
hipLaunchKernelGGL(( deviceCublasSgemm), dim3(1), dim3(1), 0, 0, n, alpha, beta, d_A, d_B, d_C);
return 0;
}
| e5180bff9766165d4afa71e72974485a70c99323.cu |
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <iostream>
__global__ void deviceCublasSgemm(int n, float alpha, float beta,
const float* d_A, const float* d_B,
float* d_C)
{
cublasHandle_t cnpHandle;
cublasStatus_t status = cublasCreate(&cnpHandle);
if (status != CUBLAS_STATUS_SUCCESS) {
return;
}
// Call function defined in the cublas_device system static library.
// This way we can verify that we properly pass system libraries to the
// device link line
status = cublasSgemm(cnpHandle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, &alpha,
d_A, n, d_B, n, &beta, d_C, n);
cublasDestroy(cnpHandle);
}
int choose_cuda_device()
{
int nDevices = 0;
cudaError_t err = cudaGetDeviceCount(&nDevices);
if (err != cudaSuccess) {
std::cerr << "Failed to retrieve the number of CUDA enabled devices"
<< std::endl;
return 1;
}
for (int i = 0; i < nDevices; ++i) {
cudaDeviceProp prop;
cudaError_t err = cudaGetDeviceProperties(&prop, i);
if (err != cudaSuccess) {
std::cerr << "Could not retrieve properties from CUDA device " << i
<< std::endl;
return 1;
}
if (prop.major > 3 || (prop.major == 3 && prop.minor >= 5)) {
err = cudaSetDevice(i);
if (err != cudaSuccess) {
std::cout << "Could not select CUDA device " << i << std::endl;
} else {
return 0;
}
}
}
std::cout << "Could not find a CUDA enabled card supporting compute >=3.5"
<< std::endl;
return 1;
}
int main(int argc, char** argv)
{
int ret = choose_cuda_device();
if (ret) {
return 0;
}
// initial values that will make sure that the cublasSgemm won't actually
// do any work
int n = 0;
float alpha = 1;
float beta = 1;
float* d_A = nullptr;
float* d_B = nullptr;
float* d_C = nullptr;
deviceCublasSgemm<<<1, 1>>>(n, alpha, beta, d_A, d_B, d_C);
return 0;
}
|
4a4c6cf156f9df119a44e8f011d9f13a44eee3f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// filename: vmult!.cu
// a simple CUDA kernel to element multiply two vectors C=alpha*A.*B
extern "C" // ensure function name to be exactly "vmultx"
{
}
__global__ void vmultx(const double alpha, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = alpha*a[i] * b[i];
} | 4a4c6cf156f9df119a44e8f011d9f13a44eee3f3.cu | #include "includes.h"
// filename: vmult!.cu
// a simple CUDA kernel to element multiply two vectors C=alpha*A.*B
extern "C" // ensure function name to be exactly "vmultx"
{
}
__global__ void vmultx(const double alpha, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = alpha*a[i] * b[i];
} |
43ea7ca3691462a548ff760d8eefbe8d994a40e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DAWN_GENERATED 1
#undef DAWN_BACKEND_T
#define DAWN_BACKEND_T CUDA
#ifndef BOOST_RESULT_OF_USE_TR1
#define BOOST_RESULT_OF_USE_TR1 1
#endif
#ifndef BOOST_NO_CXX11_DECLTYPE
#define BOOST_NO_CXX11_DECLTYPE 1
#endif
#ifndef GRIDTOOLS_DAWN_HALO_EXTENT
#define GRIDTOOLS_DAWN_HALO_EXTENT 0
#endif
#ifndef BOOST_PP_VARIADICS
#define BOOST_PP_VARIADICS 1
#endif
#ifndef BOOST_FUSION_DONT_USE_PREPROCESSED_FILES
#define BOOST_FUSION_DONT_USE_PREPROCESSED_FILES 1
#endif
#ifndef BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS
#define BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS 1
#endif
#ifndef GT_VECTOR_LIMIT_SIZE
#define GT_VECTOR_LIMIT_SIZE 30
#endif
#ifndef BOOST_FUSION_INVOKE_MAX_ARITY
#define BOOST_FUSION_INVOKE_MAX_ARITY GT_VECTOR_LIMIT_SIZE
#endif
#ifndef FUSION_MAX_VECTOR_SIZE
#define FUSION_MAX_VECTOR_SIZE GT_VECTOR_LIMIT_SIZE
#endif
#ifndef FUSION_MAX_MAP_SIZE
#define FUSION_MAX_MAP_SIZE GT_VECTOR_LIMIT_SIZE
#endif
#ifndef BOOST_MPL_LIMIT_VECTOR_SIZE
#define BOOST_MPL_LIMIT_VECTOR_SIZE GT_VECTOR_LIMIT_SIZE
#endif
#include <driver-includes/gridtools_includes.hpp>
using namespace gridtools::dawn;
namespace dawn_generated{
namespace cuda{
__global__ void __launch_bounds__(128) generated_stencil59_ms58_kernel(const int isize, const int jsize, const int ksize, const int stride_111_1, const int stride_111_2, ::dawn::float_type * const in, ::dawn::float_type * const out) {
// Start kernel
const unsigned int nx = isize;
const unsigned int ny = jsize;
const int block_size_i = (blockIdx.x + 1) * 32 < nx ? 32 : nx - blockIdx.x * 32;
const int block_size_j = (blockIdx.y + 1) * 4 < ny ? 4 : ny - blockIdx.y * 4;
// computing the global position in the physical domain
// In a typical cuda block we have the following regions
// aa bbbbbbbb cc
// aa bbbbbbbb cc
// hh dddddddd ii
// hh dddddddd ii
// hh dddddddd ii
// hh dddddddd ii
// ee ffffffff gg
// ee ffffffff gg
// Regions b,d,f have warp (or multiple of warp size)
// Size of regions a, c, h, i, e, g are determined by max_extent_t
// Regions b,d,f are easily executed by dedicated warps (one warp for each line)
// Regions (a,h,e) and (c,i,g) are executed by two specialized warp
int iblock = 0 - 1;
int jblock = 0 - 1;
if(threadIdx.y < +4) {
iblock = threadIdx.x;
jblock = (int)threadIdx.y + 0;
}
// initialized iterators
int idx111 = (blockIdx.x*32+iblock)*1+(blockIdx.y*4+jblock)*stride_111_1;
// Pre-fill of kcaches
for(int k = 0+0; k <= 10+0; ++k) {
// Head fill of kcaches
if(iblock >= 0 && iblock <= block_size_i -1 + 0 && jblock >= 0 && jblock <= block_size_j -1 + 0) {
::dawn::float_type dx;
{
out[idx111] = (((int) -4 * (__ldg(&(in[idx111])) + (__ldg(&(in[idx111+1*1])) + (__ldg(&(in[idx111+1*-1])) + (__ldg(&(in[idx111+stride_111_1*-1])) + __ldg(&(in[idx111+stride_111_1*1]))))))) / (dx * dx));
}
}
// Flush of kcaches
// Flush of kcaches
// Slide kcaches
// increment iterators
idx111+=stride_111_2;
}
// Final flush of kcaches
// Final flush of kcaches
// Final flush of kcaches
// jump iterators to match the beginning of next interval
idx111 += stride_111_2*(4);
// Pre-fill of kcaches
for(int k = 15+0; k <= ksize - 1 + 0+0; ++k) {
// Head fill of kcaches
if(iblock >= 0 && iblock <= block_size_i -1 + 0 && jblock >= 0 && jblock <= block_size_j -1 + 0) {
{
out[idx111] = (int) 10;
}
}
// Flush of kcaches
// Flush of kcaches
// Slide kcaches
// increment iterators
idx111+=stride_111_2;
}
// Final flush of kcaches
// Final flush of kcaches
// Final flush of kcaches
}
class generated {
public:
struct sbase : public timer_cuda {
sbase(std::string name) : timer_cuda(name){}
double get_time() {
return total_time();
}
};
struct stencil_59 : public sbase {
// Members
// Temporary storage typedefs
using tmp_halo_t = gridtools::halo< 0,0, 0, 0, 0>;
using tmp_meta_data_t = storage_traits_t::storage_info_t< 0, 5, tmp_halo_t >;
using tmp_storage_t = storage_traits_t::data_store_t< ::dawn::float_type, tmp_meta_data_t>;
const gridtools::dawn::domain m_dom;
public:
stencil_59(const gridtools::dawn::domain& dom_, int rank, int xcols, int ycols) : sbase("stencil_59"), m_dom(dom_){}
static constexpr dawn::driver::cartesian_extent in_extent = {-1,1, -1,1, 0,0};
static constexpr dawn::driver::cartesian_extent out_extent = {0,0, 0,0, 0,0};
void run(storage_ijk_t in_ds, storage_ijk_t out_ds) {
// starting timers
start();
{;
gridtools::data_view<storage_ijk_t> in= gridtools::make_device_view(in_ds);
gridtools::data_view<storage_ijk_t> out= gridtools::make_device_view(out_ds);
const unsigned int nx = m_dom.isize() - m_dom.iminus() - m_dom.iplus();
const unsigned int ny = m_dom.jsize() - m_dom.jminus() - m_dom.jplus();
const unsigned int nz = m_dom.ksize() - m_dom.kminus() - m_dom.kplus();
dim3 threads(32,4+0,1);
const unsigned int nbx = (nx + 32 - 1) / 32;
const unsigned int nby = (ny + 4 - 1) / 4;
const unsigned int nbz = 1;
dim3 blocks(nbx, nby, nbz);
hipLaunchKernelGGL(( generated_stencil59_ms58_kernel), dim3(blocks), dim3(threads), 0, 0, nx,ny,nz,in_ds.strides()[1],in_ds.strides()[2],(in.data()+in_ds.get_storage_info_ptr()->index(in.begin<0>(), in.begin<1>(),0 )),(out.data()+out_ds.get_storage_info_ptr()->index(out.begin<0>(), out.begin<1>(),0 )));
};
// stopping timers
pause();
}
};
static constexpr const char* s_name = "generated";
stencil_59 m_stencil_59;
public:
generated(const generated&) = delete;
// Members
// Stencil-Data
generated(const gridtools::dawn::domain& dom, int rank = 1, int xcols = 1, int ycols = 1) : m_stencil_59(dom, rank, xcols, ycols){}
template<typename S>
void sync_storages(S field) {
field.sync();
}
template<typename S0, typename ... S>
void sync_storages(S0 f0, S... fields) {
f0.sync();
sync_storages(fields...);
}
void run(storage_ijk_t in, storage_ijk_t out) {
sync_storages(in,out);
m_stencil_59.run(in,out);
;
sync_storages(in,out);
}
std::string get_name() const {
return std::string(s_name);
}
void reset_meters() {
m_stencil_59.reset(); }
double get_total_time() {
double res = 0;
res +=m_stencil_59.get_time();
return res;
}
};
} // namespace cuda
} // namespace dawn_generated
| 43ea7ca3691462a548ff760d8eefbe8d994a40e5.cu | #define DAWN_GENERATED 1
#undef DAWN_BACKEND_T
#define DAWN_BACKEND_T CUDA
#ifndef BOOST_RESULT_OF_USE_TR1
#define BOOST_RESULT_OF_USE_TR1 1
#endif
#ifndef BOOST_NO_CXX11_DECLTYPE
#define BOOST_NO_CXX11_DECLTYPE 1
#endif
#ifndef GRIDTOOLS_DAWN_HALO_EXTENT
#define GRIDTOOLS_DAWN_HALO_EXTENT 0
#endif
#ifndef BOOST_PP_VARIADICS
#define BOOST_PP_VARIADICS 1
#endif
#ifndef BOOST_FUSION_DONT_USE_PREPROCESSED_FILES
#define BOOST_FUSION_DONT_USE_PREPROCESSED_FILES 1
#endif
#ifndef BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS
#define BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS 1
#endif
#ifndef GT_VECTOR_LIMIT_SIZE
#define GT_VECTOR_LIMIT_SIZE 30
#endif
#ifndef BOOST_FUSION_INVOKE_MAX_ARITY
#define BOOST_FUSION_INVOKE_MAX_ARITY GT_VECTOR_LIMIT_SIZE
#endif
#ifndef FUSION_MAX_VECTOR_SIZE
#define FUSION_MAX_VECTOR_SIZE GT_VECTOR_LIMIT_SIZE
#endif
#ifndef FUSION_MAX_MAP_SIZE
#define FUSION_MAX_MAP_SIZE GT_VECTOR_LIMIT_SIZE
#endif
#ifndef BOOST_MPL_LIMIT_VECTOR_SIZE
#define BOOST_MPL_LIMIT_VECTOR_SIZE GT_VECTOR_LIMIT_SIZE
#endif
#include <driver-includes/gridtools_includes.hpp>
using namespace gridtools::dawn;
namespace dawn_generated{
namespace cuda{
__global__ void __launch_bounds__(128) generated_stencil59_ms58_kernel(const int isize, const int jsize, const int ksize, const int stride_111_1, const int stride_111_2, ::dawn::float_type * const in, ::dawn::float_type * const out) {
// Start kernel
const unsigned int nx = isize;
const unsigned int ny = jsize;
const int block_size_i = (blockIdx.x + 1) * 32 < nx ? 32 : nx - blockIdx.x * 32;
const int block_size_j = (blockIdx.y + 1) * 4 < ny ? 4 : ny - blockIdx.y * 4;
// computing the global position in the physical domain
// In a typical cuda block we have the following regions
// aa bbbbbbbb cc
// aa bbbbbbbb cc
// hh dddddddd ii
// hh dddddddd ii
// hh dddddddd ii
// hh dddddddd ii
// ee ffffffff gg
// ee ffffffff gg
// Regions b,d,f have warp (or multiple of warp size)
// Size of regions a, c, h, i, e, g are determined by max_extent_t
// Regions b,d,f are easily executed by dedicated warps (one warp for each line)
// Regions (a,h,e) and (c,i,g) are executed by two specialized warp
int iblock = 0 - 1;
int jblock = 0 - 1;
if(threadIdx.y < +4) {
iblock = threadIdx.x;
jblock = (int)threadIdx.y + 0;
}
// initialized iterators
int idx111 = (blockIdx.x*32+iblock)*1+(blockIdx.y*4+jblock)*stride_111_1;
// Pre-fill of kcaches
for(int k = 0+0; k <= 10+0; ++k) {
// Head fill of kcaches
if(iblock >= 0 && iblock <= block_size_i -1 + 0 && jblock >= 0 && jblock <= block_size_j -1 + 0) {
::dawn::float_type dx;
{
out[idx111] = (((int) -4 * (__ldg(&(in[idx111])) + (__ldg(&(in[idx111+1*1])) + (__ldg(&(in[idx111+1*-1])) + (__ldg(&(in[idx111+stride_111_1*-1])) + __ldg(&(in[idx111+stride_111_1*1]))))))) / (dx * dx));
}
}
// Flush of kcaches
// Flush of kcaches
// Slide kcaches
// increment iterators
idx111+=stride_111_2;
}
// Final flush of kcaches
// Final flush of kcaches
// Final flush of kcaches
// jump iterators to match the beginning of next interval
idx111 += stride_111_2*(4);
// Pre-fill of kcaches
for(int k = 15+0; k <= ksize - 1 + 0+0; ++k) {
// Head fill of kcaches
if(iblock >= 0 && iblock <= block_size_i -1 + 0 && jblock >= 0 && jblock <= block_size_j -1 + 0) {
{
out[idx111] = (int) 10;
}
}
// Flush of kcaches
// Flush of kcaches
// Slide kcaches
// increment iterators
idx111+=stride_111_2;
}
// Final flush of kcaches
// Final flush of kcaches
// Final flush of kcaches
}
class generated {
public:
struct sbase : public timer_cuda {
sbase(std::string name) : timer_cuda(name){}
double get_time() {
return total_time();
}
};
struct stencil_59 : public sbase {
// Members
// Temporary storage typedefs
using tmp_halo_t = gridtools::halo< 0,0, 0, 0, 0>;
using tmp_meta_data_t = storage_traits_t::storage_info_t< 0, 5, tmp_halo_t >;
using tmp_storage_t = storage_traits_t::data_store_t< ::dawn::float_type, tmp_meta_data_t>;
const gridtools::dawn::domain m_dom;
public:
stencil_59(const gridtools::dawn::domain& dom_, int rank, int xcols, int ycols) : sbase("stencil_59"), m_dom(dom_){}
static constexpr dawn::driver::cartesian_extent in_extent = {-1,1, -1,1, 0,0};
static constexpr dawn::driver::cartesian_extent out_extent = {0,0, 0,0, 0,0};
void run(storage_ijk_t in_ds, storage_ijk_t out_ds) {
// starting timers
start();
{;
gridtools::data_view<storage_ijk_t> in= gridtools::make_device_view(in_ds);
gridtools::data_view<storage_ijk_t> out= gridtools::make_device_view(out_ds);
const unsigned int nx = m_dom.isize() - m_dom.iminus() - m_dom.iplus();
const unsigned int ny = m_dom.jsize() - m_dom.jminus() - m_dom.jplus();
const unsigned int nz = m_dom.ksize() - m_dom.kminus() - m_dom.kplus();
dim3 threads(32,4+0,1);
const unsigned int nbx = (nx + 32 - 1) / 32;
const unsigned int nby = (ny + 4 - 1) / 4;
const unsigned int nbz = 1;
dim3 blocks(nbx, nby, nbz);
generated_stencil59_ms58_kernel<<<blocks, threads>>>(nx,ny,nz,in_ds.strides()[1],in_ds.strides()[2],(in.data()+in_ds.get_storage_info_ptr()->index(in.begin<0>(), in.begin<1>(),0 )),(out.data()+out_ds.get_storage_info_ptr()->index(out.begin<0>(), out.begin<1>(),0 )));
};
// stopping timers
pause();
}
};
static constexpr const char* s_name = "generated";
stencil_59 m_stencil_59;
public:
generated(const generated&) = delete;
// Members
// Stencil-Data
generated(const gridtools::dawn::domain& dom, int rank = 1, int xcols = 1, int ycols = 1) : m_stencil_59(dom, rank, xcols, ycols){}
template<typename S>
void sync_storages(S field) {
field.sync();
}
template<typename S0, typename ... S>
void sync_storages(S0 f0, S... fields) {
f0.sync();
sync_storages(fields...);
}
void run(storage_ijk_t in, storage_ijk_t out) {
sync_storages(in,out);
m_stencil_59.run(in,out);
;
sync_storages(in,out);
}
std::string get_name() const {
return std::string(s_name);
}
void reset_meters() {
m_stencil_59.reset(); }
double get_total_time() {
double res = 0;
res +=m_stencil_59.get_time();
return res;
}
};
} // namespace cuda
} // namespace dawn_generated
|
af5b6ac35b8f06cc8ea63cfdea5e3c635cfbc2d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "imagesHandler.h"
#include <algorithm>
#include <cfloat>
#include <chrono>
#include <fstream>
#include <iostream>
#include <random>
#include <sstream>
#include <vector>
#include <stdexcept>
#include <string>
#define BLOCKSIZE 1024
void checkCUDAError(const char *msg){
hipError_t err = hipGetLastError();
if( hipSuccess != err){
fprintf(stderr, "CUDA Error: %s: %s.\n", msg, hipGetErrorString(err) );
exit(EXIT_FAILURE);
}
}
struct Data {
explicit Data(int size) : size(size), bytes(size * sizeof(float)){
hipMalloc(&x, bytes);
hipMalloc(&y, bytes);
hipMalloc(&z, bytes);
hipMalloc(&assignments, bytes);
}
Data(int size, std::vector<float>& h_x, std::vector<float>& h_y,std::vector<float>& h_z,std::vector<float>& h_assignments): size(size),bytes(size*sizeof(float)){
hipMalloc(&x, bytes);
hipMalloc(&y, bytes);
hipMalloc(&z, bytes);
hipMalloc(&assignments, bytes);
hipMemcpy(x, h_x.data(), h_x.size()* sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(y, h_y.data(), h_x.size()* sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(z, h_z.data(), h_x.size()* sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(assignments, h_assignments.data(), h_x.size()* sizeof(float), hipMemcpyHostToDevice);
}
~Data() {
hipFree(x);
hipFree(y);
hipFree(z);
hipFree(assignments);
}
void clear() {
hipMemset(x, 0, bytes);
hipMemset(y, 0, bytes);
hipMemset(z, 0, bytes);
hipMemset(assignments, 0, bytes);
}
float* x{nullptr};
float* y{nullptr};
float* z{nullptr};
float* assignments{nullptr};
int size{0};
int bytes{0};
};
//function to easily compute l2 distance, can be quickly updated with more dimensions adding parameters
__device__ float squared_l2_distance(float x_1, float y_1, float z_1, float x_2, float y_2, float z_2) {
return (x_1 - x_2) * (x_1 - x_2) + (y_1 - y_2) * (y_1 - y_2) + (z_1 - z_2) * (z_1 - z_2);
}
//function to compute the distances AND write the cluster id into data_assignment
__global__ void assign_clusters(const float* __restrict__ data_x,
const float* __restrict__ data_y,
const float* __restrict__ data_z,
float* data_assignments,
const int data_size,
const float* __restrict__ means_x,
const float* __restrict__ means_y,
const float* __restrict__ means_z,
const float* __restrict__ means_assignments,
const int numberOfClusters) {
__shared__ float shared_means[300*3];
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= data_size) return;
//first k threads copy over the cluster means.
if (threadIdx.x < numberOfClusters) {
shared_means[threadIdx.x] = means_x[threadIdx.x];
shared_means[numberOfClusters + threadIdx.x] = means_y[threadIdx.x];
shared_means[numberOfClusters*2 + threadIdx.x] = means_z[threadIdx.x];
}
// Wait for those k threads.
__syncthreads();
const float x = data_x[index];
const float y = data_y[index];
const float z = data_z[index];
float best_distance = squared_l2_distance(x, y, z,shared_means[0],shared_means[numberOfClusters],shared_means[numberOfClusters*2]);
int best_cluster = 0;
for (int cluster = 1; cluster < numberOfClusters; cluster++) {
float distance =squared_l2_distance(x, y, z, shared_means[cluster],shared_means[numberOfClusters + cluster],shared_means[numberOfClusters*2 + cluster]);
if (distance < best_distance) {
best_distance = distance;
best_cluster = cluster;
}
}
data_assignments[index]=best_cluster;
}
//populate the big 4 array for reductions
__global__ void populate(const float* __restrict__ data_x,
const float* __restrict__ data_y,
const float* __restrict__ data_z,
const float* __restrict__ data_assignments,
const int data_size,
float* means_x,
float* means_y,
float* means_z,
float* means_assignments,
const int numberOfClusters) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= data_size) return;
for(int cluster = 0; cluster < numberOfClusters; cluster++){
if(cluster == data_assignments[index]){
means_x[index + data_size * cluster] = data_x[index];
means_y[index + data_size * cluster] = data_y[index];
means_z[index + data_size * cluster] = data_z[index];
means_assignments[index + data_size * cluster] = 1;
}else{
means_x[index + data_size * cluster] = 0;
means_y[index + data_size * cluster] = 0;
means_z[index + data_size * cluster] = 0;
means_assignments[index + data_size * cluster] = 0;
}
}
}
//REDUCTION
template <size_t blockSize>
__device__ void warpReduce(volatile float *sdata, size_t tid){
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
template<size_t blockSize>
__global__ void reductionModified(const float* __restrict__ data_x,
const float* __restrict__ data_y,
const float* __restrict__ data_z,
const float* __restrict__ data_assignments,
float* dataOutput_x,
float* dataOutput_y,
float* dataOutput_z,
float* dataOutput_assignments,
size_t data_size,
int numberOfClusters) {
__shared__ float sdata [blockSize*4] ;
for(int cluster = 0; cluster<numberOfClusters; cluster++){
size_t tid = threadIdx.x;
size_t i = blockIdx.x*(blockSize * 2) + tid;
size_t gridSize = blockSize * 2 *gridDim.x;
int x = 0;
int y = blockSize;
int z = blockSize * 2;
int ce = blockSize * 3;
sdata[tid + x] = 0;
sdata[tid + y] = 0;
sdata[tid + z] = 0;
sdata[tid + ce] = 0;
while(i < data_size){
sdata[tid + x] += data_x[i + data_size * cluster ] + data_x[i + data_size * cluster +blockSize];
sdata[tid + y] += data_y[i + data_size * cluster ]+ data_y[i + data_size * cluster +blockSize];
sdata[tid + z] += data_z[i + data_size * cluster ]+ data_z[i + data_size * cluster +blockSize];
sdata[tid + ce] += data_assignments[i + data_size * cluster ] + data_assignments[i + data_size * cluster +blockSize];
i += gridSize;
} __syncthreads();
if (blockSize >= 1024) { if (tid < 512) {
sdata[tid + x] += sdata[tid + x + 512];
sdata[tid + y] += sdata[tid + y + 512];
sdata[tid + z] += sdata[tid + z + 512];
sdata[tid + ce] += sdata[tid + ce + 512];
} __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) {
sdata[tid + x] += sdata[tid + x + 256];
sdata[tid + y] += sdata[tid + y + 256];
sdata[tid + z] += sdata[tid + z + 256];
sdata[tid + ce] += sdata[tid + ce + 256];
} __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) {
sdata[tid + x] += sdata[tid + x + 128];
sdata[tid + y] += sdata[tid + y + 128];
sdata[tid + z] += sdata[tid + z + 128];
sdata[tid + ce] += sdata[tid + ce + 128];
} __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) {
sdata[tid + x] += sdata[tid + x + 64];
sdata[tid + y] += sdata[tid + y + 64];
sdata[tid + z] += sdata[tid + z + 64];
sdata[tid + ce] += sdata[tid + ce + 64];
} __syncthreads(); }
if (tid < 32){
warpReduce<blockSize>(sdata, tid + x);
warpReduce<blockSize>(sdata, tid + y);
warpReduce<blockSize>(sdata, tid + z);
warpReduce<blockSize>(sdata, tid + ce);
} __syncthreads();
if (tid == 0){
dataOutput_x[blockIdx.x + gridDim.x * cluster] = sdata[x];
dataOutput_y[blockIdx.x + gridDim.x * cluster] = sdata[y];
dataOutput_z[blockIdx.x + gridDim.x * cluster] = sdata[z];
dataOutput_assignments[blockIdx.x + gridDim.x * cluster] = sdata[ce];
}
}
}
//fuction to compute new means with reduction results
__global__ void divideStep(float* dmx,
float* dmy,
float* dmz,
const float* __restrict__ tmpx,
const float* __restrict__ tmpy,
const float* __restrict__ tmpz,
const float* __restrict__ tmpa,
int numberOfClusters) {
if(threadIdx.x >= numberOfClusters)return;
int count = max(1,(int)tmpa[threadIdx.x]);
dmx[threadIdx.x] = tmpx[threadIdx.x]/count;
dmy[threadIdx.x] = tmpy[threadIdx.x]/count;
dmz[threadIdx.x] = tmpz[threadIdx.x]/count;
}
int main(int argc, char **argi){
//Image Handler creation
imagesHandler handler;
//Input params acqisition && Image opening by CImg and dimension acquisition
std::vector<int> params = handler.inputParamAcquisition(argi);
int iterations = params[0];
int numberOfClusters = params[1];
int columns = params[2];
int rows = params[3];
//Data array initialization
std::vector<float> h_x(rows * columns);
std::vector<float> h_y(rows * columns);
std::vector<float> h_z(rows * columns);
std::vector<float> h_assignments(rows * columns);
for(int i=0;i<rows*columns;i++){
h_assignments[i]=0;
}
//Data array population
handler.dataAcquisition(h_x, h_y, h_z);
int number_of_elements = h_x.size();
Data d_data(number_of_elements, h_x, h_y, h_z,h_assignments);checkCUDAError("Error during d_data init");
//Random first cluster means selections
std::random_device seed;
std::mt19937 rng(seed());
std::shuffle(h_x.begin(), h_x.end(), rng);
std::shuffle(h_y.begin(), h_y.end(), rng);
std::shuffle(h_z.begin(), h_z.end(), rng);
Data d_means(numberOfClusters * number_of_elements, h_x, h_y, h_z, h_assignments);checkCUDAError("Error during d_means init");
//GPU initialization
size_t blocksPerGridFixed = ::ceil((1.*number_of_elements) / BLOCKSIZE);
float* tmpx;
hipMalloc(&tmpx, sizeof(float) * blocksPerGridFixed * numberOfClusters); checkCUDAError("Error allocating tmp [GPUReduction]");
float* tmpy;
hipMalloc(&tmpy, sizeof(float) * blocksPerGridFixed * numberOfClusters); checkCUDAError("Error allocating tmp [GPUReduction]");
float* tmpz;
hipMalloc(&tmpz, sizeof(float) * blocksPerGridFixed * numberOfClusters); checkCUDAError("Error allocating tmp [GPUReduction]");
float* tmpass;
hipMalloc(&tmpass, sizeof(float) * blocksPerGridFixed * numberOfClusters); checkCUDAError("Error allocating tmp [GPUReduction]");
std::cout<< "\n\n image processing...\n\n";
//clock initialization
std::clock_t start;
double duration;
start = std::clock();
//KMEANS
for (int iteration = 0; iteration < iterations; iteration++) {
hipLaunchKernelGGL(( assign_clusters), dim3(blocksPerGridFixed), dim3(BLOCKSIZE), 0, 0, d_data.x,
d_data.y,
d_data.z,
d_data.assignments,
number_of_elements,
d_means.x,
d_means.y,
d_means.z,
d_means.assignments,
numberOfClusters
);checkCUDAError("Error during assign cluster ");
hipDeviceSynchronize();
hipLaunchKernelGGL(( populate), dim3(blocksPerGridFixed), dim3(BLOCKSIZE), 0, 0, d_data.x,
d_data.y,
d_data.z,
d_data.assignments,
number_of_elements,
d_means.x,
d_means.y,
d_means.z,
d_means.assignments,
numberOfClusters
);checkCUDAError("Error during population");
hipDeviceSynchronize();
//reduction
size_t n = number_of_elements;
do{
size_t blocksPerGrid = ::ceil((1.*n) / BLOCKSIZE);
hipLaunchKernelGGL(( reductionModified<BLOCKSIZE>), dim3(blocksPerGrid),dim3(BLOCKSIZE), 0, 0, d_means.x,
d_means.y,
d_means.z,
d_means.assignments,
tmpx,
tmpy,
tmpz,
tmpass,
n,
numberOfClusters);checkCUDAError("Error during reduction");
hipDeviceSynchronize();checkCUDAError("Error on do-while loop [GPUReduction]");
hipMemcpy(d_means.x, tmpx, sizeof(float) * blocksPerGrid * numberOfClusters, hipMemcpyDeviceToDevice);checkCUDAError("Error copying into tmpx");
hipMemcpy(d_means.y, tmpy, sizeof(float) * blocksPerGrid * numberOfClusters,hipMemcpyDeviceToDevice);checkCUDAError("Error copying into tmpy");
hipMemcpy(d_means.z, tmpz, sizeof(float) * blocksPerGrid * numberOfClusters,hipMemcpyDeviceToDevice);checkCUDAError("Error copying into tmpz");
hipMemcpy(d_means.assignments, tmpass, sizeof(float) * blocksPerGrid * numberOfClusters,hipMemcpyDeviceToDevice);checkCUDAError("Error copying into tmpass");
n = blocksPerGrid;
} while (n > BLOCKSIZE);
if (n > 1){
hipLaunchKernelGGL(( reductionModified<BLOCKSIZE>), dim3(1),dim3(BLOCKSIZE), 0, 0, tmpx,
tmpy,
tmpz,
tmpass,
tmpx,
tmpy,
tmpz,
tmpass,
n,
numberOfClusters);checkCUDAError("Error during last step reduction");
hipDeviceSynchronize();checkCUDAError("Error on mid main loop [GPUReduction]");
hipLaunchKernelGGL(( divideStep), dim3(1),dim3(BLOCKSIZE), 0, 0, d_means.x,
d_means.y,
d_means.z,
tmpx,
tmpy,
tmpz,
tmpass,
numberOfClusters);checkCUDAError("Error during divideStep");
}
hipDeviceSynchronize();checkCUDAError("Error on bottom main loop [GPUReduction]");
}
hipFree(tmpx);
hipFree(tmpy);
hipFree(tmpz);
hipFree(tmpass);
duration = ( std::clock() - start ) / (double) CLOCKS_PER_SEC;
std::cout<< "PROCESSING TIME: "<< duration << " s" <<'\n';
//Processed data acquisition to coloring output image
float* h_best;
h_best = (float*)malloc(h_x.size()*sizeof(float));
hipMemcpy(h_best,d_data.assignments, h_x.size()*sizeof(float), hipMemcpyDeviceToHost);
float* finalmeanx;
float* finalmeany;
float* finalmeanz;
finalmeanx = (float*)malloc(numberOfClusters*sizeof(float));
finalmeany = (float*)malloc(numberOfClusters*sizeof(float));
finalmeanz = (float*)malloc(numberOfClusters*sizeof(float));
hipMemcpy(finalmeanx, d_means.x, numberOfClusters*sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(finalmeany, d_means.y, numberOfClusters*sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(finalmeanz, d_means.z, numberOfClusters*sizeof(float),hipMemcpyDeviceToHost);
std::vector<int> clustColorR(numberOfClusters);
std::vector<int> clustColorG(numberOfClusters);
std::vector<int> clustColorB(numberOfClusters);
for (int cluster = 0; cluster < numberOfClusters; cluster++){
clustColorR[cluster]=(int)finalmeanx[cluster];
clustColorG[cluster]=(int)finalmeany[cluster];
clustColorB[cluster]=(int)finalmeanz[cluster];
}
int* assignedPixels;
assignedPixels = (int*)malloc(number_of_elements*sizeof(int));
for(int i=0; i<number_of_elements; i++){
assignedPixels[i]=(int)h_best[i];
}
handler.disp(assignedPixels, clustColorR, clustColorG, clustColorB);
}
| af5b6ac35b8f06cc8ea63cfdea5e3c635cfbc2d0.cu | #include "imagesHandler.h"
#include <algorithm>
#include <cfloat>
#include <chrono>
#include <fstream>
#include <iostream>
#include <random>
#include <sstream>
#include <vector>
#include <stdexcept>
#include <string>
#define BLOCKSIZE 1024
void checkCUDAError(const char *msg){
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err){
fprintf(stderr, "CUDA Error: %s: %s.\n", msg, cudaGetErrorString(err) );
exit(EXIT_FAILURE);
}
}
struct Data {
explicit Data(int size) : size(size), bytes(size * sizeof(float)){
cudaMalloc(&x, bytes);
cudaMalloc(&y, bytes);
cudaMalloc(&z, bytes);
cudaMalloc(&assignments, bytes);
}
Data(int size, std::vector<float>& h_x, std::vector<float>& h_y,std::vector<float>& h_z,std::vector<float>& h_assignments): size(size),bytes(size*sizeof(float)){
cudaMalloc(&x, bytes);
cudaMalloc(&y, bytes);
cudaMalloc(&z, bytes);
cudaMalloc(&assignments, bytes);
cudaMemcpy(x, h_x.data(), h_x.size()* sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y, h_y.data(), h_x.size()* sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(z, h_z.data(), h_x.size()* sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(assignments, h_assignments.data(), h_x.size()* sizeof(float), cudaMemcpyHostToDevice);
}
~Data() {
cudaFree(x);
cudaFree(y);
cudaFree(z);
cudaFree(assignments);
}
void clear() {
cudaMemset(x, 0, bytes);
cudaMemset(y, 0, bytes);
cudaMemset(z, 0, bytes);
cudaMemset(assignments, 0, bytes);
}
float* x{nullptr};
float* y{nullptr};
float* z{nullptr};
float* assignments{nullptr};
int size{0};
int bytes{0};
};
//function to easily compute l2 distance, can be quickly updated with more dimensions adding parameters
__device__ float squared_l2_distance(float x_1, float y_1, float z_1, float x_2, float y_2, float z_2) {
return (x_1 - x_2) * (x_1 - x_2) + (y_1 - y_2) * (y_1 - y_2) + (z_1 - z_2) * (z_1 - z_2);
}
//function to compute the distances AND write the cluster id into data_assignment
__global__ void assign_clusters(const float* __restrict__ data_x,
const float* __restrict__ data_y,
const float* __restrict__ data_z,
float* data_assignments,
const int data_size,
const float* __restrict__ means_x,
const float* __restrict__ means_y,
const float* __restrict__ means_z,
const float* __restrict__ means_assignments,
const int numberOfClusters) {
__shared__ float shared_means[300*3];
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= data_size) return;
//first k threads copy over the cluster means.
if (threadIdx.x < numberOfClusters) {
shared_means[threadIdx.x] = means_x[threadIdx.x];
shared_means[numberOfClusters + threadIdx.x] = means_y[threadIdx.x];
shared_means[numberOfClusters*2 + threadIdx.x] = means_z[threadIdx.x];
}
// Wait for those k threads.
__syncthreads();
const float x = data_x[index];
const float y = data_y[index];
const float z = data_z[index];
float best_distance = squared_l2_distance(x, y, z,shared_means[0],shared_means[numberOfClusters],shared_means[numberOfClusters*2]);
int best_cluster = 0;
for (int cluster = 1; cluster < numberOfClusters; cluster++) {
float distance =squared_l2_distance(x, y, z, shared_means[cluster],shared_means[numberOfClusters + cluster],shared_means[numberOfClusters*2 + cluster]);
if (distance < best_distance) {
best_distance = distance;
best_cluster = cluster;
}
}
data_assignments[index]=best_cluster;
}
//populate the big 4 array for reductions
__global__ void populate(const float* __restrict__ data_x,
const float* __restrict__ data_y,
const float* __restrict__ data_z,
const float* __restrict__ data_assignments,
const int data_size,
float* means_x,
float* means_y,
float* means_z,
float* means_assignments,
const int numberOfClusters) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= data_size) return;
for(int cluster = 0; cluster < numberOfClusters; cluster++){
if(cluster == data_assignments[index]){
means_x[index + data_size * cluster] = data_x[index];
means_y[index + data_size * cluster] = data_y[index];
means_z[index + data_size * cluster] = data_z[index];
means_assignments[index + data_size * cluster] = 1;
}else{
means_x[index + data_size * cluster] = 0;
means_y[index + data_size * cluster] = 0;
means_z[index + data_size * cluster] = 0;
means_assignments[index + data_size * cluster] = 0;
}
}
}
//REDUCTION
template <size_t blockSize>
__device__ void warpReduce(volatile float *sdata, size_t tid){
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
template<size_t blockSize>
__global__ void reductionModified(const float* __restrict__ data_x,
const float* __restrict__ data_y,
const float* __restrict__ data_z,
const float* __restrict__ data_assignments,
float* dataOutput_x,
float* dataOutput_y,
float* dataOutput_z,
float* dataOutput_assignments,
size_t data_size,
int numberOfClusters) {
__shared__ float sdata [blockSize*4] ;
for(int cluster = 0; cluster<numberOfClusters; cluster++){
size_t tid = threadIdx.x;
size_t i = blockIdx.x*(blockSize * 2) + tid;
size_t gridSize = blockSize * 2 *gridDim.x;
int x = 0;
int y = blockSize;
int z = blockSize * 2;
int ce = blockSize * 3;
sdata[tid + x] = 0;
sdata[tid + y] = 0;
sdata[tid + z] = 0;
sdata[tid + ce] = 0;
while(i < data_size){
sdata[tid + x] += data_x[i + data_size * cluster ] + data_x[i + data_size * cluster +blockSize];
sdata[tid + y] += data_y[i + data_size * cluster ]+ data_y[i + data_size * cluster +blockSize];
sdata[tid + z] += data_z[i + data_size * cluster ]+ data_z[i + data_size * cluster +blockSize];
sdata[tid + ce] += data_assignments[i + data_size * cluster ] + data_assignments[i + data_size * cluster +blockSize];
i += gridSize;
} __syncthreads();
if (blockSize >= 1024) { if (tid < 512) {
sdata[tid + x] += sdata[tid + x + 512];
sdata[tid + y] += sdata[tid + y + 512];
sdata[tid + z] += sdata[tid + z + 512];
sdata[tid + ce] += sdata[tid + ce + 512];
} __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) {
sdata[tid + x] += sdata[tid + x + 256];
sdata[tid + y] += sdata[tid + y + 256];
sdata[tid + z] += sdata[tid + z + 256];
sdata[tid + ce] += sdata[tid + ce + 256];
} __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) {
sdata[tid + x] += sdata[tid + x + 128];
sdata[tid + y] += sdata[tid + y + 128];
sdata[tid + z] += sdata[tid + z + 128];
sdata[tid + ce] += sdata[tid + ce + 128];
} __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) {
sdata[tid + x] += sdata[tid + x + 64];
sdata[tid + y] += sdata[tid + y + 64];
sdata[tid + z] += sdata[tid + z + 64];
sdata[tid + ce] += sdata[tid + ce + 64];
} __syncthreads(); }
if (tid < 32){
warpReduce<blockSize>(sdata, tid + x);
warpReduce<blockSize>(sdata, tid + y);
warpReduce<blockSize>(sdata, tid + z);
warpReduce<blockSize>(sdata, tid + ce);
} __syncthreads();
if (tid == 0){
dataOutput_x[blockIdx.x + gridDim.x * cluster] = sdata[x];
dataOutput_y[blockIdx.x + gridDim.x * cluster] = sdata[y];
dataOutput_z[blockIdx.x + gridDim.x * cluster] = sdata[z];
dataOutput_assignments[blockIdx.x + gridDim.x * cluster] = sdata[ce];
}
}
}
//fuction to compute new means with reduction results
__global__ void divideStep(float* dmx,
float* dmy,
float* dmz,
const float* __restrict__ tmpx,
const float* __restrict__ tmpy,
const float* __restrict__ tmpz,
const float* __restrict__ tmpa,
int numberOfClusters) {
if(threadIdx.x >= numberOfClusters)return;
int count = max(1,(int)tmpa[threadIdx.x]);
dmx[threadIdx.x] = tmpx[threadIdx.x]/count;
dmy[threadIdx.x] = tmpy[threadIdx.x]/count;
dmz[threadIdx.x] = tmpz[threadIdx.x]/count;
}
int main(int argc, char **argi){
//Image Handler creation
imagesHandler handler;
//Input params acqisition && Image opening by CImg and dimension acquisition
std::vector<int> params = handler.inputParamAcquisition(argi);
int iterations = params[0];
int numberOfClusters = params[1];
int columns = params[2];
int rows = params[3];
//Data array initialization
std::vector<float> h_x(rows * columns);
std::vector<float> h_y(rows * columns);
std::vector<float> h_z(rows * columns);
std::vector<float> h_assignments(rows * columns);
for(int i=0;i<rows*columns;i++){
h_assignments[i]=0;
}
//Data array population
handler.dataAcquisition(h_x, h_y, h_z);
int number_of_elements = h_x.size();
Data d_data(number_of_elements, h_x, h_y, h_z,h_assignments);checkCUDAError("Error during d_data init");
//Random first cluster means selections
std::random_device seed;
std::mt19937 rng(seed());
std::shuffle(h_x.begin(), h_x.end(), rng);
std::shuffle(h_y.begin(), h_y.end(), rng);
std::shuffle(h_z.begin(), h_z.end(), rng);
Data d_means(numberOfClusters * number_of_elements, h_x, h_y, h_z, h_assignments);checkCUDAError("Error during d_means init");
//GPU initialization
size_t blocksPerGridFixed = std::ceil((1.*number_of_elements) / BLOCKSIZE);
float* tmpx;
cudaMalloc(&tmpx, sizeof(float) * blocksPerGridFixed * numberOfClusters); checkCUDAError("Error allocating tmp [GPUReduction]");
float* tmpy;
cudaMalloc(&tmpy, sizeof(float) * blocksPerGridFixed * numberOfClusters); checkCUDAError("Error allocating tmp [GPUReduction]");
float* tmpz;
cudaMalloc(&tmpz, sizeof(float) * blocksPerGridFixed * numberOfClusters); checkCUDAError("Error allocating tmp [GPUReduction]");
float* tmpass;
cudaMalloc(&tmpass, sizeof(float) * blocksPerGridFixed * numberOfClusters); checkCUDAError("Error allocating tmp [GPUReduction]");
std::cout<< "\n\n image processing...\n\n";
//clock initialization
std::clock_t start;
double duration;
start = std::clock();
//KMEANS
for (int iteration = 0; iteration < iterations; iteration++) {
assign_clusters<<<blocksPerGridFixed, BLOCKSIZE>>>(d_data.x,
d_data.y,
d_data.z,
d_data.assignments,
number_of_elements,
d_means.x,
d_means.y,
d_means.z,
d_means.assignments,
numberOfClusters
);checkCUDAError("Error during assign cluster ");
cudaDeviceSynchronize();
populate<<<blocksPerGridFixed, BLOCKSIZE>>>(d_data.x,
d_data.y,
d_data.z,
d_data.assignments,
number_of_elements,
d_means.x,
d_means.y,
d_means.z,
d_means.assignments,
numberOfClusters
);checkCUDAError("Error during population");
cudaDeviceSynchronize();
//reduction
size_t n = number_of_elements;
do{
size_t blocksPerGrid = std::ceil((1.*n) / BLOCKSIZE);
reductionModified<BLOCKSIZE><<<blocksPerGrid,BLOCKSIZE>>>(d_means.x,
d_means.y,
d_means.z,
d_means.assignments,
tmpx,
tmpy,
tmpz,
tmpass,
n,
numberOfClusters);checkCUDAError("Error during reduction");
cudaDeviceSynchronize();checkCUDAError("Error on do-while loop [GPUReduction]");
cudaMemcpy(d_means.x, tmpx, sizeof(float) * blocksPerGrid * numberOfClusters, cudaMemcpyDeviceToDevice);checkCUDAError("Error copying into tmpx");
cudaMemcpy(d_means.y, tmpy, sizeof(float) * blocksPerGrid * numberOfClusters,cudaMemcpyDeviceToDevice);checkCUDAError("Error copying into tmpy");
cudaMemcpy(d_means.z, tmpz, sizeof(float) * blocksPerGrid * numberOfClusters,cudaMemcpyDeviceToDevice);checkCUDAError("Error copying into tmpz");
cudaMemcpy(d_means.assignments, tmpass, sizeof(float) * blocksPerGrid * numberOfClusters,cudaMemcpyDeviceToDevice);checkCUDAError("Error copying into tmpass");
n = blocksPerGrid;
} while (n > BLOCKSIZE);
if (n > 1){
reductionModified<BLOCKSIZE><<<1,BLOCKSIZE>>>(tmpx,
tmpy,
tmpz,
tmpass,
tmpx,
tmpy,
tmpz,
tmpass,
n,
numberOfClusters);checkCUDAError("Error during last step reduction");
cudaDeviceSynchronize();checkCUDAError("Error on mid main loop [GPUReduction]");
divideStep<<<1,BLOCKSIZE>>>(d_means.x,
d_means.y,
d_means.z,
tmpx,
tmpy,
tmpz,
tmpass,
numberOfClusters);checkCUDAError("Error during divideStep");
}
cudaDeviceSynchronize();checkCUDAError("Error on bottom main loop [GPUReduction]");
}
cudaFree(tmpx);
cudaFree(tmpy);
cudaFree(tmpz);
cudaFree(tmpass);
duration = ( std::clock() - start ) / (double) CLOCKS_PER_SEC;
std::cout<< "PROCESSING TIME: "<< duration << " s" <<'\n';
//Processed data acquisition to coloring output image
float* h_best;
h_best = (float*)malloc(h_x.size()*sizeof(float));
cudaMemcpy(h_best,d_data.assignments, h_x.size()*sizeof(float), cudaMemcpyDeviceToHost);
float* finalmeanx;
float* finalmeany;
float* finalmeanz;
finalmeanx = (float*)malloc(numberOfClusters*sizeof(float));
finalmeany = (float*)malloc(numberOfClusters*sizeof(float));
finalmeanz = (float*)malloc(numberOfClusters*sizeof(float));
cudaMemcpy(finalmeanx, d_means.x, numberOfClusters*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(finalmeany, d_means.y, numberOfClusters*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(finalmeanz, d_means.z, numberOfClusters*sizeof(float),cudaMemcpyDeviceToHost);
std::vector<int> clustColorR(numberOfClusters);
std::vector<int> clustColorG(numberOfClusters);
std::vector<int> clustColorB(numberOfClusters);
for (int cluster = 0; cluster < numberOfClusters; cluster++){
clustColorR[cluster]=(int)finalmeanx[cluster];
clustColorG[cluster]=(int)finalmeany[cluster];
clustColorB[cluster]=(int)finalmeanz[cluster];
}
int* assignedPixels;
assignedPixels = (int*)malloc(number_of_elements*sizeof(int));
for(int i=0; i<number_of_elements; i++){
assignedPixels[i]=(int)h_best[i];
}
handler.disp(assignedPixels, clustColorR, clustColorG, clustColorB);
}
|
e3808fb368b194b1aaf29a3697ac433f12333a68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// gsl-lite is based on GSL: Guidelines Support Library.
// For more information see https://github.com/martinmoene/gsl-lite
//
// Copyright (c) 2015 Martin Moene
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "../gsl-lite.t.hpp"
#include <memory>
__global__ void preconditionAssertionKernel( int i, int j )
{
gsl_Expects( i >= 0 );
gsl_ExpectsAudit( i < j );
gsl_Ensures( i >= 0 );
gsl_EnsuresAudit( i < j );
gsl_Assert( i >= 0 );
gsl_AssertAudit( i < j );
}
CASE( "CUDA: Precondition/postcondition checks and assertions can be used in kernel code" )
{
hipLaunchKernelGGL(( preconditionAssertionKernel), dim3(1), dim3(1), 0, 0, 0, 1 );
// TODO: check for failure
}
__global__ void failFastKernel()
{
gsl_FailFast();
}
CASE( "CUDA: gsl_FailFast() can be used in kernel code" )
{
hipLaunchKernelGGL(( failFastKernel), dim3(1), dim3(1), 0, 0, );
// TODO: check for failure
}
__global__ void spanKernel( gsl::span< int > span )
{
int* data = span.data();
gsl_CONFIG_SPAN_INDEX_TYPE size = span.size();
if (size > 0)
{
span[ 0 ] = 42;
at( span, 0 ) = 42;
}
// TODO: add more tests
}
CASE( "CUDA: span<> can be used in kernel code" )
{
hipLaunchKernelGGL(( spanKernel), dim3(1), dim3(1), 0, 0, gsl::span< int >( ) );
// TODO: check for failure
// TODO: add more tests
}
__global__ void notNullKernel( gsl::not_null< int* > ptr )
{
// TODO: add more tests
}
CASE( "CUDA: not_null<> can be used in kernel code" )
{
// TODO: run kernel
// TODO: check for failure
// TODO: add more tests
}
| e3808fb368b194b1aaf29a3697ac433f12333a68.cu | //
// gsl-lite is based on GSL: Guidelines Support Library.
// For more information see https://github.com/martinmoene/gsl-lite
//
// Copyright (c) 2015 Martin Moene
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "../gsl-lite.t.hpp"
#include <memory>
__global__ void preconditionAssertionKernel( int i, int j )
{
gsl_Expects( i >= 0 );
gsl_ExpectsAudit( i < j );
gsl_Ensures( i >= 0 );
gsl_EnsuresAudit( i < j );
gsl_Assert( i >= 0 );
gsl_AssertAudit( i < j );
}
CASE( "CUDA: Precondition/postcondition checks and assertions can be used in kernel code" )
{
preconditionAssertionKernel<<<1, 1>>>( 0, 1 );
// TODO: check for failure
}
__global__ void failFastKernel()
{
gsl_FailFast();
}
CASE( "CUDA: gsl_FailFast() can be used in kernel code" )
{
failFastKernel<<<1, 1>>>();
// TODO: check for failure
}
__global__ void spanKernel( gsl::span< int > span )
{
int* data = span.data();
gsl_CONFIG_SPAN_INDEX_TYPE size = span.size();
if (size > 0)
{
span[ 0 ] = 42;
at( span, 0 ) = 42;
}
// TODO: add more tests
}
CASE( "CUDA: span<> can be used in kernel code" )
{
spanKernel<<<1, 1>>>( gsl::span< int >( ) );
// TODO: check for failure
// TODO: add more tests
}
__global__ void notNullKernel( gsl::not_null< int* > ptr )
{
// TODO: add more tests
}
CASE( "CUDA: not_null<> can be used in kernel code" )
{
// TODO: run kernel
// TODO: check for failure
// TODO: add more tests
}
|
252e335e1d1dc46761948752fd9170f46acdc8fe.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "add.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int *sum = NULL;
hipMalloc(&sum, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
add), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,sum);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
add), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,sum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
add), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,sum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 252e335e1d1dc46761948752fd9170f46acdc8fe.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "add.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int *sum = NULL;
cudaMalloc(&sum, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
add<<<gridBlock,threadBlock>>>(a,b,sum);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
add<<<gridBlock,threadBlock>>>(a,b,sum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
add<<<gridBlock,threadBlock>>>(a,b,sum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
60e615584dc7689b04dd88215957df49f8bd7e9a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "parameter.h"
#include "array_definition.h"
#include "cuda_funclist.h"
// advectbyzx
__global__ void advectbyzx1_kernel(float *adv1_u, float *adv1_b, int *adv1_nx, int *adv1_ny, int *adv1_nz, float *adv1_dt, float *adv1_temp);
__global__ void advectbyzx2_kernel(float *adv2_u, float *adv2_b, int *adv2_nx, int *adv2_ny, int *adv2_nz, float *adv2_dt, float *adv2_temp);
__global__ void advectbyzx1b_kernel(float *adv1_u, float *adv1_b, int *adv1_nx, int *adv1_ny, int *adv1_nz, float *adv1_dt, float *adv1_temp);
__global__ void advectbyzx2b_kernel(float *adv2_u, float *adv2_b, int *adv2_nx, int *adv2_ny, int *adv2_nz, float *adv2_dt, float *adv2_temp);
void cuda_advectbyzx(float *adv_u, float *adv_b, int *adv_nx, int *adv_ny, int *adv_nz, float *adv_dt, float *adv_temp, int *h_adv_nx, int *h_adv_ny, int *h_adv_nz)
{
// send it to device to calculate
dim3 dimGrid(*h_adv_ny,*h_adv_nz);
dim3 dimBlock(*h_adv_nx);hipLaunchKernelGGL((
advectbyzx1_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, adv_u,adv_b,adv_nx,adv_ny,adv_nz,adv_dt,adv_temp);hipLaunchKernelGGL((
advectbyzx1b_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, adv_u,adv_b,adv_nx,adv_ny,adv_nz,adv_dt,adv_temp);hipLaunchKernelGGL((
advectbyzx2_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, adv_u,adv_b,adv_nx,adv_ny,adv_nz,adv_dt,adv_temp);hipLaunchKernelGGL((
advectbyzx2b_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, adv_u,adv_b,adv_nx,adv_ny,adv_nz,adv_dt,adv_temp);
//
hipDeviceSynchronize();
//
checkCUDAError("kernel execution in cuda_advectbyzx");
//
}
//
__global__ void advectbyzx1_kernel(float *adv1_u, float *adv1_b, int *adv1_nx, int *adv1_ny, int *adv1_nz, float *adv1_dt, float *adv1_temp)
{
/*
two dimensional array of blocks on grid where each block has one dimensional array of threads:
UniqueBlockIndex = blockIdx.y * gridDim.x + blockIdx.x;
UniqueThreadIndex = UniqueBlockIndex * blockDim.x + threadIdx.x;
*/
/*
i = threadIdx.x
j = blockIdx.x
k = blockIdx.y
nx = blockDim.x
ny = gridDim.x
nz = gridDim.y
*/
//
__shared__ float adv1_s_u[5*BLOCK_SIZE];
__shared__ float adv1_s_u_jm[5*BLOCK_SIZE];
__shared__ float adv1_s_b[3*BLOCK_SIZE];
//
int adv1_jm;
adv1_jm=(blockIdx.x+(*adv1_ny)-1)%(*adv1_ny);
//
for (int ii=0; ii<5; ii++)
{
adv1_s_u[a2D_FinC(5,blockDim.x,ii,threadIdx.x)]=adv1_u[a4D_FinC(5,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,blockIdx.x,blockIdx.y)];
}
for (int ii=0; ii<5; ii++)
{
adv1_s_u_jm[a2D_FinC(5,blockDim.x,ii,threadIdx.x)]=adv1_u[a4D_FinC(5,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,adv1_jm,blockIdx.y)];
}
for (int ii=0; ii<3; ii++)
{
adv1_s_b[a2D_FinC(3,blockDim.x,ii,threadIdx.x)]=adv1_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,blockIdx.x,blockIdx.y)];
}
__syncthreads();
//
float vx;
vx=(adv1_s_u_jm[a2D_FinC(5,blockDim.x,(2-1),threadIdx.x)]+adv1_s_u[a2D_FinC(5,blockDim.x,(2-1),threadIdx.x)])/(adv1_s_u_jm[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]+adv1_s_u[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]);
//
int adv1_imm,adv1_imp;
adv1_imm=(threadIdx.x+(*adv1_nx)-1)%(*adv1_nx);
adv1_imp=(threadIdx.x+1)%(*adv1_nx);
//
__shared__ float adv1_s_tmp1[BLOCK_SIZE];
adv1_s_tmp1[threadIdx.x]=vx;
__syncthreads();
//
vx=(adv1_s_tmp1[adv1_imm]+adv1_s_tmp1[adv1_imp]+2.0*adv1_s_tmp1[threadIdx.x])/4.0;
//
float b1x;
b1x=adv1_s_b[a2D_FinC(3,blockDim.x,(2-1),threadIdx.x)];
//
// first tvdb
float vg;
vg=vx;
float b;
b=b1x;
__shared__ float adv1_s_vg[BLOCK_SIZE];
adv1_s_vg[threadIdx.x]=vx;
__syncthreads();
//
float vh;
vh=(adv1_s_vg[threadIdx.x]+adv1_s_vg[adv1_imp])/2.0;
//
__shared__ float adv1_s_tmp2[BLOCK_SIZE];
adv1_s_tmp2[threadIdx.x]=b*vg;
__syncthreads();
float flux1;
if (vh>0) flux1=b*vg;
else flux1=adv1_s_tmp2[adv1_imp];
adv1_s_tmp1[threadIdx.x]=flux1;
__syncthreads();
float b1;
b1=b-(flux1-adv1_s_tmp1[adv1_imm])*(*adv1_dt)/2.0;
//
int ip;
int ipp;
int im;
ip=(threadIdx.x+1)%(*adv1_nx);
ipp=(ip+1)%(*adv1_nx);
im=(threadIdx.x+(*adv1_nx)-1)%(*adv1_nx);
//
float v;
v=vh;
float w;
float wp;
float wm;
__shared__ float adv1_s_b1_tvdb[BLOCK_SIZE];
adv1_s_b1_tvdb[threadIdx.x]=b1;
__syncthreads();
if (v>0)
{
w=adv1_s_vg[threadIdx.x]*adv1_s_b1_tvdb[threadIdx.x];
wp=(adv1_s_vg[ip]*adv1_s_b1_tvdb[ip]-w)/2.0;
wm=(w-adv1_s_vg[im]*adv1_s_b1_tvdb[im])/2.0;
}
else
{
w=adv1_s_vg[ip]*adv1_s_b1_tvdb[ip];
wp=(w-adv1_s_vg[ipp]*adv1_s_b1_tvdb[ipp])/2.0;
wm=(adv1_s_vg[threadIdx.x]*adv1_s_b1_tvdb[threadIdx.x]-w)/2.0;
}
float dw;
dw=0.0;
//
if (wm*wp>0) dw=2.0*wm*wp/(wm+wp);
float flux;
flux=(w+dw)*(*adv1_dt);
//
adv1_s_tmp2[threadIdx.x]=flux;
__syncthreads();
b=b-(flux-adv1_s_tmp2[adv1_imm]);
// finished tvdb
//
adv1_s_b[a2D_FinC(3,blockDim.x,(2-1),threadIdx.x)]=b;
adv1_s_b[a2D_FinC(3,blockDim.x,(1-1),threadIdx.x)]=adv1_s_b[a2D_FinC(3,blockDim.x,(1-1),threadIdx.x)]-adv1_s_tmp2[adv1_imm];
//
// send it back to global
for (int ii=0; ii<3; ii++)
{
adv1_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,blockIdx.x,blockIdx.y)]=adv1_s_b[a2D_FinC(3,blockDim.x,ii,threadIdx.x)];
}
adv1_temp[a3D_FinC(blockDim.x,gridDim.x,gridDim.y,threadIdx.x,blockIdx.x,blockIdx.y)]=adv1_s_tmp2[adv1_imm];
//
return;
}
__global__ void advectbyzx1b_kernel(float *adv1_u, float *adv1_b, int *adv1_nx, int *adv1_ny, int *adv1_nz, float *adv1_dt, float *adv1_temp)
{
/*
two dimensional array of blocks on grid where each block has one dimensional array of threads:
UniqueBlockIndex = blockIdx.y * gridDim.x + blockIdx.x;
UniqueThreadIndex = UniqueBlockIndex * blockDim.x + threadIdx.x;
*/
/*
i = threadIdx.x
j = blockIdx.x
k = blockIdx.y
nx = blockDim.x
ny = gridDim.x
nz = gridDim.y
*/
int adv1_jm;
adv1_jm=(blockIdx.x+(*adv1_ny)-1)%(*adv1_ny);
//
adv1_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,(1-1),threadIdx.x,adv1_jm,blockIdx.y)]=adv1_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,(1-1),threadIdx.x,adv1_jm,blockIdx.y)]+adv1_temp[a3D_FinC(blockDim.x,gridDim.x,gridDim.y,threadIdx.x,blockIdx.x,blockIdx.y)];
//
return;
}
__global__ void advectbyzx2_kernel(float *adv2_u, float *adv2_b, int *adv2_nx, int *adv2_ny, int *adv2_nz, float *adv2_dt, float *adv2_temp)
{
/*
two dimensional array of blocks on grid where each block has one dimensional array of threads:
UniqueBlockIndex = blockIdx.y * gridDim.x + blockIdx.x;
UniqueThreadIndex = UniqueBlockIndex * blockDim.x + threadIdx.x;
*/
/*
i = threadIdx.x
j = blockIdx.x
k = blockIdx.y
nx = blockDim.x
ny = gridDim.x
nz = gridDim.y
*/
//
__shared__ float adv2_s_u[5*BLOCK_SIZE];
__shared__ float adv2_s_u_km[5*BLOCK_SIZE];
__shared__ float adv2_s_b[3*BLOCK_SIZE];
//
int adv2_km;
adv2_km=(blockIdx.y+(*adv2_nz)-1)%(*adv2_nz);
//
for (int ii=0; ii<5; ii++)
{
adv2_s_u[a2D_FinC(5,blockDim.x,ii,threadIdx.x)]=adv2_u[a4D_FinC(5,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,blockIdx.x,blockIdx.y)];
}
for (int ii=0; ii<5; ii++)
{
adv2_s_u_km[a2D_FinC(5,blockDim.x,ii,threadIdx.x)]=adv2_u[a4D_FinC(5,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,adv2_km,blockIdx.y)];
}
for (int ii=0; ii<3; ii++)
{
adv2_s_b[a2D_FinC(3,blockDim.x,ii,threadIdx.x)]=adv2_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,blockIdx.x,blockIdx.y)];
}
__syncthreads();
//
float vx;
vx=(adv2_s_u_km[a2D_FinC(5,blockDim.x,(2-1),threadIdx.x)]+adv2_s_u[a2D_FinC(5,blockDim.x,(2-1),threadIdx.x)])/(adv2_s_u_km[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]+adv2_s_u[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]);
//
int adv2_imm,adv2_imp;
adv2_imm=(threadIdx.x+(*adv2_nx)-1)%(*adv2_nx);
adv2_imp=(threadIdx.x+1)%(*adv2_nx);
//
__shared__ float adv2_s_tmp1[BLOCK_SIZE];
adv2_s_tmp1[threadIdx.x]=vx;
__syncthreads();
//
vx=(adv2_s_tmp1[adv2_imm]+adv2_s_tmp1[adv2_imp]+2.0*adv2_s_tmp1[threadIdx.x])/4.0;
//
float b1x;
b1x=adv2_s_b[a2D_FinC(3,blockDim.x,(3-1),threadIdx.x)];
//
// second tvdb
float vg;
vg=vx;
float b;
b=b1x;
__shared__ float adv2_s_vg[BLOCK_SIZE];
adv2_s_vg[threadIdx.x]=vx;
__syncthreads();
//
float vh;
vh=(adv2_s_vg[threadIdx.x]+adv2_s_vg[adv2_imp])/2.0;
//
__shared__ float adv2_s_tmp2[BLOCK_SIZE];
adv2_s_tmp2[threadIdx.x]=b*vg;
__syncthreads();
float flux1;
if (vh>0) flux1=b*vg;
else flux1=adv2_s_tmp2[adv2_imp];
adv2_s_tmp1[threadIdx.x]=flux1;
__syncthreads();
float b1;
b1=b-(flux1-adv2_s_tmp1[adv2_imm])*(*adv2_dt)/2.0;
//
int ip;
int ipp;
int im;
ip=(threadIdx.x+1)%(*adv2_nx);
ipp=(ip+1)%(*adv2_nx);
im=(threadIdx.x+(*adv2_nx)-1)%(*adv2_nx);
//
float v;
v=vh;
float w;
float wp;
float wm;
__shared__ float adv2_s_b1_tvdb[BLOCK_SIZE];
adv2_s_b1_tvdb[threadIdx.x]=b1;
__syncthreads();
if (v>0)
{
w=adv2_s_vg[threadIdx.x]*adv2_s_b1_tvdb[threadIdx.x];
wp=(adv2_s_vg[ip]*adv2_s_b1_tvdb[ip]-w)/2.0;
wm=(w-adv2_s_vg[im]*adv2_s_b1_tvdb[im])/2.0;
}
else
{
w=adv2_s_vg[ip]*adv2_s_b1_tvdb[ip];
wp=(w-adv2_s_vg[ipp]*adv2_s_b1_tvdb[ipp])/2.0;
wm=(adv2_s_vg[threadIdx.x]*adv2_s_b1_tvdb[threadIdx.x]-w)/2.0;
}
float dw;
dw=0.0;
//
if (wm*wp>0) dw=2.0*wm*wp/(wm+wp);
float flux;
flux=(w+dw)*(*adv2_dt);
//
adv2_s_tmp2[threadIdx.x]=flux;
__syncthreads();
b=b-(flux-adv2_s_tmp2[adv2_imm]);
// finished tvdb
adv2_s_b[a2D_FinC(3,blockDim.x,(3-1),threadIdx.x)]=b;
adv2_s_b[a2D_FinC(3,blockDim.x,(1-1),threadIdx.x)]=adv2_s_b[a2D_FinC(3,blockDim.x,(1-1),threadIdx.x)]-adv2_s_tmp2[adv2_imm];
for (int ii=0; ii<3; ii++)
{
adv2_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,blockIdx.x,blockIdx.y)]=adv2_s_b[a2D_FinC(3,blockDim.x,ii,threadIdx.x)];
}
adv2_temp[a3D_FinC(blockDim.x,gridDim.x,gridDim.y,threadIdx.x,blockIdx.x,blockIdx.y)]=adv2_s_tmp2[adv2_imm];
//
return;
}
__global__ void advectbyzx2b_kernel(float *adv2_u, float *adv2_b, int *adv2_nx, int *adv2_ny, int *adv2_nz, float *adv2_dt, float *adv2_temp)
{
/*
two dimensional array of blocks on grid where each block has one dimensional array of threads:
UniqueBlockIndex = blockIdx.y * gridDim.x + blockIdx.x;
UniqueThreadIndex = UniqueBlockIndex * blockDim.x + threadIdx.x;
*/
/*
i = threadIdx.x
j = blockIdx.x
k = blockIdx.y
nx = blockDim.x
ny = gridDim.x
nz = gridDim.y
*/
//
int adv2_km;
adv2_km=(blockIdx.y+(*adv2_nz)-1)%(*adv2_nz);
//
adv2_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,(1-1),threadIdx.x,blockIdx.x,adv2_km)]=adv2_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,(1-1),threadIdx.x,blockIdx.x,adv2_km)]+adv2_temp[a3D_FinC(blockDim.x,gridDim.x,gridDim.y,threadIdx.x,blockIdx.x,blockIdx.y)];
//
return;
}
| 60e615584dc7689b04dd88215957df49f8bd7e9a.cu | #include <stdio.h>
#include <math.h>
#include "cuda.h"
#include "parameter.h"
#include "array_definition.h"
#include "cuda_funclist.h"
// advectbyzx
__global__ void advectbyzx1_kernel(float *adv1_u, float *adv1_b, int *adv1_nx, int *adv1_ny, int *adv1_nz, float *adv1_dt, float *adv1_temp);
__global__ void advectbyzx2_kernel(float *adv2_u, float *adv2_b, int *adv2_nx, int *adv2_ny, int *adv2_nz, float *adv2_dt, float *adv2_temp);
__global__ void advectbyzx1b_kernel(float *adv1_u, float *adv1_b, int *adv1_nx, int *adv1_ny, int *adv1_nz, float *adv1_dt, float *adv1_temp);
__global__ void advectbyzx2b_kernel(float *adv2_u, float *adv2_b, int *adv2_nx, int *adv2_ny, int *adv2_nz, float *adv2_dt, float *adv2_temp);
void cuda_advectbyzx(float *adv_u, float *adv_b, int *adv_nx, int *adv_ny, int *adv_nz, float *adv_dt, float *adv_temp, int *h_adv_nx, int *h_adv_ny, int *h_adv_nz)
{
// send it to device to calculate
dim3 dimGrid(*h_adv_ny,*h_adv_nz);
dim3 dimBlock(*h_adv_nx);
advectbyzx1_kernel<<< dimGrid, dimBlock >>>(adv_u,adv_b,adv_nx,adv_ny,adv_nz,adv_dt,adv_temp);
advectbyzx1b_kernel<<< dimGrid, dimBlock >>>(adv_u,adv_b,adv_nx,adv_ny,adv_nz,adv_dt,adv_temp);
advectbyzx2_kernel<<< dimGrid, dimBlock >>>(adv_u,adv_b,adv_nx,adv_ny,adv_nz,adv_dt,adv_temp);
advectbyzx2b_kernel<<< dimGrid, dimBlock >>>(adv_u,adv_b,adv_nx,adv_ny,adv_nz,adv_dt,adv_temp);
//
cudaThreadSynchronize();
//
checkCUDAError("kernel execution in cuda_advectbyzx");
//
}
//
__global__ void advectbyzx1_kernel(float *adv1_u, float *adv1_b, int *adv1_nx, int *adv1_ny, int *adv1_nz, float *adv1_dt, float *adv1_temp)
{
/*
two dimensional array of blocks on grid where each block has one dimensional array of threads:
UniqueBlockIndex = blockIdx.y * gridDim.x + blockIdx.x;
UniqueThreadIndex = UniqueBlockIndex * blockDim.x + threadIdx.x;
*/
/*
i = threadIdx.x
j = blockIdx.x
k = blockIdx.y
nx = blockDim.x
ny = gridDim.x
nz = gridDim.y
*/
//
__shared__ float adv1_s_u[5*BLOCK_SIZE];
__shared__ float adv1_s_u_jm[5*BLOCK_SIZE];
__shared__ float adv1_s_b[3*BLOCK_SIZE];
//
int adv1_jm;
adv1_jm=(blockIdx.x+(*adv1_ny)-1)%(*adv1_ny);
//
for (int ii=0; ii<5; ii++)
{
adv1_s_u[a2D_FinC(5,blockDim.x,ii,threadIdx.x)]=adv1_u[a4D_FinC(5,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,blockIdx.x,blockIdx.y)];
}
for (int ii=0; ii<5; ii++)
{
adv1_s_u_jm[a2D_FinC(5,blockDim.x,ii,threadIdx.x)]=adv1_u[a4D_FinC(5,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,adv1_jm,blockIdx.y)];
}
for (int ii=0; ii<3; ii++)
{
adv1_s_b[a2D_FinC(3,blockDim.x,ii,threadIdx.x)]=adv1_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,blockIdx.x,blockIdx.y)];
}
__syncthreads();
//
float vx;
vx=(adv1_s_u_jm[a2D_FinC(5,blockDim.x,(2-1),threadIdx.x)]+adv1_s_u[a2D_FinC(5,blockDim.x,(2-1),threadIdx.x)])/(adv1_s_u_jm[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]+adv1_s_u[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]);
//
int adv1_imm,adv1_imp;
adv1_imm=(threadIdx.x+(*adv1_nx)-1)%(*adv1_nx);
adv1_imp=(threadIdx.x+1)%(*adv1_nx);
//
__shared__ float adv1_s_tmp1[BLOCK_SIZE];
adv1_s_tmp1[threadIdx.x]=vx;
__syncthreads();
//
vx=(adv1_s_tmp1[adv1_imm]+adv1_s_tmp1[adv1_imp]+2.0*adv1_s_tmp1[threadIdx.x])/4.0;
//
float b1x;
b1x=adv1_s_b[a2D_FinC(3,blockDim.x,(2-1),threadIdx.x)];
//
// first tvdb
float vg;
vg=vx;
float b;
b=b1x;
__shared__ float adv1_s_vg[BLOCK_SIZE];
adv1_s_vg[threadIdx.x]=vx;
__syncthreads();
//
float vh;
vh=(adv1_s_vg[threadIdx.x]+adv1_s_vg[adv1_imp])/2.0;
//
__shared__ float adv1_s_tmp2[BLOCK_SIZE];
adv1_s_tmp2[threadIdx.x]=b*vg;
__syncthreads();
float flux1;
if (vh>0) flux1=b*vg;
else flux1=adv1_s_tmp2[adv1_imp];
adv1_s_tmp1[threadIdx.x]=flux1;
__syncthreads();
float b1;
b1=b-(flux1-adv1_s_tmp1[adv1_imm])*(*adv1_dt)/2.0;
//
int ip;
int ipp;
int im;
ip=(threadIdx.x+1)%(*adv1_nx);
ipp=(ip+1)%(*adv1_nx);
im=(threadIdx.x+(*adv1_nx)-1)%(*adv1_nx);
//
float v;
v=vh;
float w;
float wp;
float wm;
__shared__ float adv1_s_b1_tvdb[BLOCK_SIZE];
adv1_s_b1_tvdb[threadIdx.x]=b1;
__syncthreads();
if (v>0)
{
w=adv1_s_vg[threadIdx.x]*adv1_s_b1_tvdb[threadIdx.x];
wp=(adv1_s_vg[ip]*adv1_s_b1_tvdb[ip]-w)/2.0;
wm=(w-adv1_s_vg[im]*adv1_s_b1_tvdb[im])/2.0;
}
else
{
w=adv1_s_vg[ip]*adv1_s_b1_tvdb[ip];
wp=(w-adv1_s_vg[ipp]*adv1_s_b1_tvdb[ipp])/2.0;
wm=(adv1_s_vg[threadIdx.x]*adv1_s_b1_tvdb[threadIdx.x]-w)/2.0;
}
float dw;
dw=0.0;
//
if (wm*wp>0) dw=2.0*wm*wp/(wm+wp);
float flux;
flux=(w+dw)*(*adv1_dt);
//
adv1_s_tmp2[threadIdx.x]=flux;
__syncthreads();
b=b-(flux-adv1_s_tmp2[adv1_imm]);
// finished tvdb
//
adv1_s_b[a2D_FinC(3,blockDim.x,(2-1),threadIdx.x)]=b;
adv1_s_b[a2D_FinC(3,blockDim.x,(1-1),threadIdx.x)]=adv1_s_b[a2D_FinC(3,blockDim.x,(1-1),threadIdx.x)]-adv1_s_tmp2[adv1_imm];
//
// send it back to global
for (int ii=0; ii<3; ii++)
{
adv1_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,blockIdx.x,blockIdx.y)]=adv1_s_b[a2D_FinC(3,blockDim.x,ii,threadIdx.x)];
}
adv1_temp[a3D_FinC(blockDim.x,gridDim.x,gridDim.y,threadIdx.x,blockIdx.x,blockIdx.y)]=adv1_s_tmp2[adv1_imm];
//
return;
}
__global__ void advectbyzx1b_kernel(float *adv1_u, float *adv1_b, int *adv1_nx, int *adv1_ny, int *adv1_nz, float *adv1_dt, float *adv1_temp)
{
/*
two dimensional array of blocks on grid where each block has one dimensional array of threads:
UniqueBlockIndex = blockIdx.y * gridDim.x + blockIdx.x;
UniqueThreadIndex = UniqueBlockIndex * blockDim.x + threadIdx.x;
*/
/*
i = threadIdx.x
j = blockIdx.x
k = blockIdx.y
nx = blockDim.x
ny = gridDim.x
nz = gridDim.y
*/
int adv1_jm;
adv1_jm=(blockIdx.x+(*adv1_ny)-1)%(*adv1_ny);
//
adv1_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,(1-1),threadIdx.x,adv1_jm,blockIdx.y)]=adv1_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,(1-1),threadIdx.x,adv1_jm,blockIdx.y)]+adv1_temp[a3D_FinC(blockDim.x,gridDim.x,gridDim.y,threadIdx.x,blockIdx.x,blockIdx.y)];
//
return;
}
__global__ void advectbyzx2_kernel(float *adv2_u, float *adv2_b, int *adv2_nx, int *adv2_ny, int *adv2_nz, float *adv2_dt, float *adv2_temp)
{
/*
two dimensional array of blocks on grid where each block has one dimensional array of threads:
UniqueBlockIndex = blockIdx.y * gridDim.x + blockIdx.x;
UniqueThreadIndex = UniqueBlockIndex * blockDim.x + threadIdx.x;
*/
/*
i = threadIdx.x
j = blockIdx.x
k = blockIdx.y
nx = blockDim.x
ny = gridDim.x
nz = gridDim.y
*/
//
__shared__ float adv2_s_u[5*BLOCK_SIZE];
__shared__ float adv2_s_u_km[5*BLOCK_SIZE];
__shared__ float adv2_s_b[3*BLOCK_SIZE];
//
int adv2_km;
adv2_km=(blockIdx.y+(*adv2_nz)-1)%(*adv2_nz);
//
for (int ii=0; ii<5; ii++)
{
adv2_s_u[a2D_FinC(5,blockDim.x,ii,threadIdx.x)]=adv2_u[a4D_FinC(5,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,blockIdx.x,blockIdx.y)];
}
for (int ii=0; ii<5; ii++)
{
adv2_s_u_km[a2D_FinC(5,blockDim.x,ii,threadIdx.x)]=adv2_u[a4D_FinC(5,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,adv2_km,blockIdx.y)];
}
for (int ii=0; ii<3; ii++)
{
adv2_s_b[a2D_FinC(3,blockDim.x,ii,threadIdx.x)]=adv2_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,blockIdx.x,blockIdx.y)];
}
__syncthreads();
//
float vx;
vx=(adv2_s_u_km[a2D_FinC(5,blockDim.x,(2-1),threadIdx.x)]+adv2_s_u[a2D_FinC(5,blockDim.x,(2-1),threadIdx.x)])/(adv2_s_u_km[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]+adv2_s_u[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]);
//
int adv2_imm,adv2_imp;
adv2_imm=(threadIdx.x+(*adv2_nx)-1)%(*adv2_nx);
adv2_imp=(threadIdx.x+1)%(*adv2_nx);
//
__shared__ float adv2_s_tmp1[BLOCK_SIZE];
adv2_s_tmp1[threadIdx.x]=vx;
__syncthreads();
//
vx=(adv2_s_tmp1[adv2_imm]+adv2_s_tmp1[adv2_imp]+2.0*adv2_s_tmp1[threadIdx.x])/4.0;
//
float b1x;
b1x=adv2_s_b[a2D_FinC(3,blockDim.x,(3-1),threadIdx.x)];
//
// second tvdb
float vg;
vg=vx;
float b;
b=b1x;
__shared__ float adv2_s_vg[BLOCK_SIZE];
adv2_s_vg[threadIdx.x]=vx;
__syncthreads();
//
float vh;
vh=(adv2_s_vg[threadIdx.x]+adv2_s_vg[adv2_imp])/2.0;
//
__shared__ float adv2_s_tmp2[BLOCK_SIZE];
adv2_s_tmp2[threadIdx.x]=b*vg;
__syncthreads();
float flux1;
if (vh>0) flux1=b*vg;
else flux1=adv2_s_tmp2[adv2_imp];
adv2_s_tmp1[threadIdx.x]=flux1;
__syncthreads();
float b1;
b1=b-(flux1-adv2_s_tmp1[adv2_imm])*(*adv2_dt)/2.0;
//
int ip;
int ipp;
int im;
ip=(threadIdx.x+1)%(*adv2_nx);
ipp=(ip+1)%(*adv2_nx);
im=(threadIdx.x+(*adv2_nx)-1)%(*adv2_nx);
//
float v;
v=vh;
float w;
float wp;
float wm;
__shared__ float adv2_s_b1_tvdb[BLOCK_SIZE];
adv2_s_b1_tvdb[threadIdx.x]=b1;
__syncthreads();
if (v>0)
{
w=adv2_s_vg[threadIdx.x]*adv2_s_b1_tvdb[threadIdx.x];
wp=(adv2_s_vg[ip]*adv2_s_b1_tvdb[ip]-w)/2.0;
wm=(w-adv2_s_vg[im]*adv2_s_b1_tvdb[im])/2.0;
}
else
{
w=adv2_s_vg[ip]*adv2_s_b1_tvdb[ip];
wp=(w-adv2_s_vg[ipp]*adv2_s_b1_tvdb[ipp])/2.0;
wm=(adv2_s_vg[threadIdx.x]*adv2_s_b1_tvdb[threadIdx.x]-w)/2.0;
}
float dw;
dw=0.0;
//
if (wm*wp>0) dw=2.0*wm*wp/(wm+wp);
float flux;
flux=(w+dw)*(*adv2_dt);
//
adv2_s_tmp2[threadIdx.x]=flux;
__syncthreads();
b=b-(flux-adv2_s_tmp2[adv2_imm]);
// finished tvdb
adv2_s_b[a2D_FinC(3,blockDim.x,(3-1),threadIdx.x)]=b;
adv2_s_b[a2D_FinC(3,blockDim.x,(1-1),threadIdx.x)]=adv2_s_b[a2D_FinC(3,blockDim.x,(1-1),threadIdx.x)]-adv2_s_tmp2[adv2_imm];
for (int ii=0; ii<3; ii++)
{
adv2_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,blockIdx.x,blockIdx.y)]=adv2_s_b[a2D_FinC(3,blockDim.x,ii,threadIdx.x)];
}
adv2_temp[a3D_FinC(blockDim.x,gridDim.x,gridDim.y,threadIdx.x,blockIdx.x,blockIdx.y)]=adv2_s_tmp2[adv2_imm];
//
return;
}
__global__ void advectbyzx2b_kernel(float *adv2_u, float *adv2_b, int *adv2_nx, int *adv2_ny, int *adv2_nz, float *adv2_dt, float *adv2_temp)
{
/*
two dimensional array of blocks on grid where each block has one dimensional array of threads:
UniqueBlockIndex = blockIdx.y * gridDim.x + blockIdx.x;
UniqueThreadIndex = UniqueBlockIndex * blockDim.x + threadIdx.x;
*/
/*
i = threadIdx.x
j = blockIdx.x
k = blockIdx.y
nx = blockDim.x
ny = gridDim.x
nz = gridDim.y
*/
//
int adv2_km;
adv2_km=(blockIdx.y+(*adv2_nz)-1)%(*adv2_nz);
//
adv2_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,(1-1),threadIdx.x,blockIdx.x,adv2_km)]=adv2_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,(1-1),threadIdx.x,blockIdx.x,adv2_km)]+adv2_temp[a3D_FinC(blockDim.x,gridDim.x,gridDim.y,threadIdx.x,blockIdx.x,blockIdx.y)];
//
return;
}
|
pad_impl.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdint.h>
#include "kernel/gpu/cuda_impl/pad_impl.cuh"
template <typename T>
__global__ void Pad(const size_t size, const T* input, const int num, const int channels, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top,
const int pad_left, float pad_value, T* output) {
T pad_value_ = static_cast<T>(pad_value);
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int block_num = pos / padded_width / padded_height;
const int padded_w = pos % padded_width;
const int padded_h = pos / padded_width % padded_height;
if (padded_h - pad_top < 0 || padded_w - pad_left < 0 || padded_h - pad_top >= old_height ||
padded_w - pad_left >= old_width) {
output[pos] = pad_value_;
} else {
output[pos] = input[(block_num * old_height + padded_h - pad_top) * old_width + padded_w - pad_left];
}
}
return;
}
template <typename T>
__global__ void PadGrad(const size_t size, const T* dy, const int num, const int channels, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top,
const int pad_left, T* dx) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int block_num = pos / old_width / old_height;
const int padded_w = pos % old_width + pad_left;
const int padded_h = pos / old_width % old_height + pad_top;
dx[pos] = dy[(block_num * padded_height + padded_h) * padded_width + padded_w];
}
return;
}
template <typename T>
void CalPad(const size_t size, const T* input, const int num, const int channels, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top, const int pad_left,
const float pad_value, T* output, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( Pad), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, input, num, channels, old_height, old_width,
padded_height, padded_width, pad_top, pad_left, pad_value,
output);
return;
}
template <typename T>
void CalPadGrad(const size_t size, const T* dy, const int num, const int channels, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top,
const int pad_left, T* dx, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( PadGrad), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, dy, num, channels, old_height, old_width,
padded_height, padded_width, pad_top, pad_left, dx);
return;
}
template void CalPad<float>(const size_t size, const float* input, const int num, const int channels,
const int old_height, const int old_width, const int padded_height, const int padded_width,
const int pad_top, const int pad_left, float pad_value, float* output,
hipStream_t cuda_stream);
template void CalPadGrad<float>(const size_t size, const float* dy, const int num, const int channels,
const int old_height, const int old_width, const int padded_height,
const int padded_width, const int pad_top, const int pad_left, float* dx,
hipStream_t cuda_stream);
template void CalPad<half>(const size_t size, const half* input, const int num, const int channels,
const int old_height, const int old_width, const int padded_height, const int padded_width,
const int pad_top, const int pad_left, float pad_value, half* output,
hipStream_t cuda_stream);
template void CalPadGrad<half>(const size_t size, const half* dy, const int num, const int channels,
const int old_height, const int old_width, const int padded_height,
const int padded_width, const int pad_top, const int pad_left, half* dx,
hipStream_t cuda_stream);
| pad_impl.cu | /**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdint.h>
#include "kernel/gpu/cuda_impl/pad_impl.cuh"
template <typename T>
__global__ void Pad(const size_t size, const T* input, const int num, const int channels, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top,
const int pad_left, float pad_value, T* output) {
T pad_value_ = static_cast<T>(pad_value);
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int block_num = pos / padded_width / padded_height;
const int padded_w = pos % padded_width;
const int padded_h = pos / padded_width % padded_height;
if (padded_h - pad_top < 0 || padded_w - pad_left < 0 || padded_h - pad_top >= old_height ||
padded_w - pad_left >= old_width) {
output[pos] = pad_value_;
} else {
output[pos] = input[(block_num * old_height + padded_h - pad_top) * old_width + padded_w - pad_left];
}
}
return;
}
template <typename T>
__global__ void PadGrad(const size_t size, const T* dy, const int num, const int channels, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top,
const int pad_left, T* dx) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int block_num = pos / old_width / old_height;
const int padded_w = pos % old_width + pad_left;
const int padded_h = pos / old_width % old_height + pad_top;
dx[pos] = dy[(block_num * padded_height + padded_h) * padded_width + padded_w];
}
return;
}
template <typename T>
void CalPad(const size_t size, const T* input, const int num, const int channels, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top, const int pad_left,
const float pad_value, T* output, cudaStream_t cuda_stream) {
Pad<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, input, num, channels, old_height, old_width,
padded_height, padded_width, pad_top, pad_left, pad_value,
output);
return;
}
template <typename T>
void CalPadGrad(const size_t size, const T* dy, const int num, const int channels, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top,
const int pad_left, T* dx, cudaStream_t cuda_stream) {
PadGrad<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, dy, num, channels, old_height, old_width,
padded_height, padded_width, pad_top, pad_left, dx);
return;
}
template void CalPad<float>(const size_t size, const float* input, const int num, const int channels,
const int old_height, const int old_width, const int padded_height, const int padded_width,
const int pad_top, const int pad_left, float pad_value, float* output,
cudaStream_t cuda_stream);
template void CalPadGrad<float>(const size_t size, const float* dy, const int num, const int channels,
const int old_height, const int old_width, const int padded_height,
const int padded_width, const int pad_top, const int pad_left, float* dx,
cudaStream_t cuda_stream);
template void CalPad<half>(const size_t size, const half* input, const int num, const int channels,
const int old_height, const int old_width, const int padded_height, const int padded_width,
const int pad_top, const int pad_left, float pad_value, half* output,
cudaStream_t cuda_stream);
template void CalPadGrad<half>(const size_t size, const half* dy, const int num, const int channels,
const int old_height, const int old_width, const int padded_height,
const int padded_width, const int pad_top, const int pad_left, half* dx,
cudaStream_t cuda_stream);
|
75ebcc4026a06b16a980d930b1b3320d808291f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
// file: altis\src\cuda\level2\particlefilter\ex_particle_CUDA_float_seq.cu
//
// summary: Exception particle cuda float sequence class
//
// origin: Rodinia (http://rodinia.cs.virginia.edu/doku.php)
////////////////////////////////////////////////////////////////////////////////////////////////////
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <time.h>
#include <sys/time.h>
#include "OptionParser.h"
#include "ResultDatabase.h"
#include "cudacommon.h"
#define BLOCK_X 16
#define BLOCK_Y 16
#define PI 3.1415926535897932
const int threads_per_block = 512;
bool verbose = false;
bool quiet = false;
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
double get_wall_time(){
struct timeval time;
if (gettimeofday(&time,NULL)){
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
/********************************
* CALC LIKELIHOOD SUM
* DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* param 1 I 3D matrix
* param 2 current ind array
* param 3 length of ind array
* returns a double representing the sum
********************************/
__device__ double calcLikelihoodSum(unsigned char * I, int * ind, int numOnes, int index) {
double likelihoodSum = 0.0;
int x;
for (x = 0; x < numOnes; x++)
likelihoodSum += (pow((double) (I[ind[index * numOnes + x]] - 100), 2) - pow((double) (I[ind[index * numOnes + x]] - 228), 2)) / 50.0;
return likelihoodSum;
}
/****************************
CDF CALCULATE
CALCULATES CDF
param1 CDF
param2 weights
param3 Nparticles
*****************************/
__device__ void cdfCalc(double * CDF, double * weights, int Nparticles) {
int x;
CDF[0] = weights[0];
for (x = 1; x < Nparticles; x++) {
CDF[x] = weights[x] + CDF[x - 1];
}
}
/*****************************
* RANDU
* GENERATES A UNIFORM DISTRIBUTION
* returns a double representing a randomily generated number from a uniform distribution with range [0, 1)
******************************/
__device__ double d_randu(int * seed, int index) {
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index) {
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index) {
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * PI * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
double test_randn(int * seed, int index) {
//Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
__device__ double d_randn(int * seed, int index) {
//Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = d_randu(seed, index);
double v = d_randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
/****************************
UPDATE WEIGHTS
UPDATES WEIGHTS
param1 weights
param2 likelihood
param3 Nparticles
****************************/
__device__ double updateWeights(double * weights, double * likelihood, int Nparticles) {
int x;
double sum = 0;
for (x = 0; x < Nparticles; x++) {
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value) {
if (endIndex < beginIndex)
return -1;
int middleIndex;
while (endIndex > beginIndex) {
middleIndex = beginIndex + ((endIndex - beginIndex) / 2);
if (CDF[middleIndex] >= value) {
if (middleIndex == 0)
return middleIndex;
else if (CDF[middleIndex - 1] < value)
return middleIndex;
else if (CDF[middleIndex - 1] == value) {
while (CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if (CDF[middleIndex] > value)
endIndex = middleIndex - 1;
else
beginIndex = middleIndex + 1;
}
return -1;
}
/** added this function. was missing in original double version.
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
__device__ double dev_round_double(double value) {
int newValue = (int) (value);
if (value - newValue < .5f)
return newValue;
else
return newValue++;
}
/*****************************
* CUDA Find Index Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: weights
* param8: Nparticles
*****************************/
__global__ void find_index_kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, double * weights, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i < Nparticles) {
int index = -1;
int x;
for (x = 0; x < Nparticles; x++) {
if (CDF[x] >= u[i]) {
index = x;
break;
}
}
if (index == -1) {
index = Nparticles - 1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
//weights[i] = 1 / ((double) (Nparticles)); //moved this code to the beginning of likelihood kernel
}
__syncthreads();
}
__global__ void normalize_weights_kernel(double * weights, int Nparticles, double* partial_sums, double * CDF, double * u, int * seed) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
__shared__ double u1, sumWeights;
if(0 == threadIdx.x)
sumWeights = partial_sums[0];
__syncthreads();
if (i < Nparticles) {
weights[i] = weights[i] / sumWeights;
}
__syncthreads();
if (i == 0) {
cdfCalc(CDF, weights, Nparticles);
u[0] = (1 / ((double) (Nparticles))) * d_randu(seed, i); // do this to allow all threads in all blocks to use the same u1
}
__syncthreads();
if(0 == threadIdx.x)
u1 = u[0];
__syncthreads();
if (i < Nparticles) {
u[i] = u1 + i / ((double) (Nparticles));
}
}
__global__ void sum_kernel(double* partial_sums, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i == 0) {
int x;
double sum = 0.0;
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
for (x = 0; x < num_blocks; x++) {
sum += partial_sums[x];
}
partial_sums[0] = sum;
}
}
/*****************************
* CUDA Likelihood Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param2.5: CDF
* param3: ind
* param4: objxy
* param5: likelihood
* param6: I
* param6.5: u
* param6.75: weights
* param7: Nparticles
* param8: countOnes
* param9: max_size
* param10: k
* param11: IszY
* param12: Nfr
*****************************/
__global__ void likelihood_kernel(double * arrayX, double * arrayY, double * xj, double * yj, double * CDF, int * ind, int * objxy, double * likelihood, unsigned char * I, double * u, double * weights, int Nparticles, int countOnes, int max_size, int k, int IszY, int Nfr, int *seed, double* partial_sums) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
int indX, indY;
__shared__ double buffer[512];
if (i < Nparticles) {
arrayX[i] = xj[i];
arrayY[i] = yj[i];
weights[i] = 1 / ((double) (Nparticles)); //Donnie - moved this line from end of find_index_kernel to prevent all weights from being reset before calculating position on final iteration.
arrayX[i] = arrayX[i] + 1.0 + 5.0 * d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0 * d_randn(seed, i);
}
__syncthreads();
if (i < Nparticles) {
for (y = 0; y < countOnes; y++) {
//added dev_round_double() to be consistent with roundDouble
indX = dev_round_double(arrayX[i]) + objxy[y * 2 + 1];
indY = dev_round_double(arrayY[i]) + objxy[y * 2];
ind[i * countOnes + y] = abs(indX * IszY * Nfr + indY * Nfr + k);
if (ind[i * countOnes + y] >= max_size)
ind[i * countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i] / countOnes;
weights[i] = weights[i] * exp(likelihood[i]); //Donnie Newell - added the missing exponential function call
}
buffer[threadIdx.x] = 0.0;
__syncthreads();
if (i < Nparticles) {
buffer[threadIdx.x] = weights[i];
}
__syncthreads();
//this doesn't account for the last block that isn't full
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value) {
int newValue = (int) (value);
if (value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, unsigned char * array3D, int * dimX, int * dimY, int * dimZ) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
if (array3D[x * *dimY * *dimZ + y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(unsigned char * array3D, int * dimX, int * dimY, int * dimZ, int * seed) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (unsigned char) (5 * randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius) {
int diameter = radius * 2 - 1;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
double distance = sqrt(pow((double) (x - radius + 1), 2) + pow((double) (y - radius + 1), 2));
if (distance < radius) {
disk[x * diameter + y] = 1;
} else {
disk[x * diameter + y] = 0;
}
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(unsigned char * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) {
int startX = posX - error;
while (startX < 0)
startX++;
int startY = posY - error;
while (startY < 0)
startY++;
int endX = posX + error;
while (endX > dimX)
endX--;
int endY = posY + error;
while (endY > dimY)
endY--;
int x, y;
for (x = startX; x < endX; x++) {
for (y = startY; y < endY; y++) {
double distance = sqrt(pow((double) (x - posX), 2) + pow((double) (y - posY), 2));
if (distance < error)
matrix[x * dimY * dimZ + y * dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(unsigned char * matrix, int dimX, int dimY, int dimZ, int error, unsigned char * newMatrix) {
int x, y, z;
for (z = 0; z < dimZ; z++) {
for (x = 0; x < dimX; x++) {
for (y = 0; y < dimY; y++) {
if (matrix[x * dimY * dimZ + y * dimZ + z] == 1) {
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, int * neighbors, int radius) {
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius * 2 - 1;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (se[x * diameter + y]) {
neighbors[neighY * 2] = (int) (y - center);
neighbors[neighY * 2 + 1] = (int) (x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the background intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(unsigned char * I, int IszX, int IszY, int Nfr, int * seed) {
int k;
int max_size = IszX * IszY * Nfr;
/*get object centers*/
int x0 = (int) roundDouble(IszY / 2.0);
int y0 = (int) roundDouble(IszX / 2.0);
I[x0 * IszY * Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for (k = 1; k < Nfr; k++) {
xk = abs(x0 + (k-1));
yk = abs(y0 - 2 * (k-1));
pos = yk * IszY * Nfr + xk * Nfr + k;
if (pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
unsigned char * newMatrix = (unsigned char *) malloc(sizeof (unsigned char) * IszX * IszY * Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for (x = 0; x < IszX; x++) {
for (y = 0; y < IszY; y++) {
for (k = 0; k < Nfr; k++) {
I[x * IszY * Nfr + y * Nfr + k] = newMatrix[x * IszY * Nfr + y * Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value) {
int index = -1;
int x;
for (x = 0; x < lengthCDF; x++) {
if (CDF[x] >= value) {
index = x;
break;
}
}
if (index == -1) {
return lengthCDF - 1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(unsigned char * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles, ResultDatabase &resultDB) {
float kernelTime = 0.0f;
float transferTime = 0.0f;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsedTime;
int max_size = IszX * IszY*Nfr;
//original particle centroid
double xe = roundDouble(IszY / 2.0);
double ye = roundDouble(IszX / 2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius * 2 - 1;
int * disk = (int*) malloc(diameter * diameter * sizeof (int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (disk[x * diameter + y] == 1)
countOnes++;
}
}
int * objxy = (int *) malloc(countOnes * 2 * sizeof (int));
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
double * weights = (double *) malloc(sizeof (double) *Nparticles);
for (x = 0; x < Nparticles; x++) {
weights[x] = 1 / ((double) (Nparticles));
}
//initial likelihood to 0.0
double * likelihood = (double *) malloc(sizeof (double) *Nparticles);
double * arrayX = (double *) malloc(sizeof (double) *Nparticles);
double * arrayY = (double *) malloc(sizeof (double) *Nparticles);
double * xj = (double *) malloc(sizeof (double) *Nparticles);
double * yj = (double *) malloc(sizeof (double) *Nparticles);
double * CDF = (double *) malloc(sizeof (double) *Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
double * likelihood_GPU;
unsigned char * I_GPU;
double * weights_GPU;
int * objxy_GPU;
int * ind = (int*) malloc(sizeof (int) *countOnes * Nparticles);
int * ind_GPU;
double * u = (double *) malloc(sizeof (double) *Nparticles);
double * u_GPU;
int * seed_GPU;
double* partial_sums;
//CUDA memory allocation
CUDA_SAFE_CALL(hipMalloc((void **) &arrayX_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &arrayY_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &xj_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &yj_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &CDF_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &u_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &likelihood_GPU, sizeof (double) *Nparticles));
//set likelihood to zero
CUDA_SAFE_CALL(hipMemset((void *) likelihood_GPU, 0, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &weights_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &I_GPU, sizeof (unsigned char) *IszX * IszY * Nfr));
CUDA_SAFE_CALL(hipMalloc((void **) &objxy_GPU, sizeof (int) *2 * countOnes));
CUDA_SAFE_CALL(hipMalloc((void **) &ind_GPU, sizeof (int) *countOnes * Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &seed_GPU, sizeof (int) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &partial_sums, sizeof (double) *Nparticles));
//Donnie - this loop is different because in this kernel, arrayX and arrayY
// are set equal to xj before every iteration, so effectively, arrayX and
// arrayY will be set to xe and ye before the first iteration.
for (x = 0; x < Nparticles; x++) {
xj[x] = xe;
yj[x] = ye;
}
int k;
//start send
hipEventRecord(start, 0);
CUDA_SAFE_CALL(hipMemcpy(I_GPU, I, sizeof (unsigned char) *IszX * IszY*Nfr, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(objxy_GPU, objxy, sizeof (int) *2 * countOnes, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(weights_GPU, weights, sizeof (double) *Nparticles, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(xj_GPU, xj, sizeof (double) *Nparticles, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(yj_GPU, yj, sizeof (double) *Nparticles, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(seed_GPU, seed, sizeof (int) *Nparticles, hipMemcpyHostToDevice));
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
transferTime += elapsedTime * 1.e-3;
double wall1 = get_wall_time();
for (k = 1; k < Nfr; k++) {
hipEventRecord(start, 0);
likelihood_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU,
arrayY_GPU, xj_GPU, yj_GPU, CDF_GPU, ind_GPU, objxy_GPU,
likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles,
countOnes, max_size, k, IszY, Nfr, seed_GPU, partial_sums);
sum_kernel << < num_blocks, threads_per_block >> > (partial_sums, Nparticles);
normalize_weights_kernel << < num_blocks, threads_per_block >> > (weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU);
find_index_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR();
}//end loop
//block till kernels are finished
hipDeviceSynchronize();
double wall2 = get_wall_time();
hipFree(xj_GPU);
hipFree(yj_GPU);
hipFree(CDF_GPU);
hipFree(u_GPU);
hipFree(likelihood_GPU);
hipFree(I_GPU);
hipFree(objxy_GPU);
hipFree(ind_GPU);
hipFree(seed_GPU);
hipFree(partial_sums);
hipEventRecord(start, 0);
CUDA_SAFE_CALL(hipMemcpy(arrayX, arrayX_GPU, sizeof (double) *Nparticles, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(arrayY, arrayY_GPU, sizeof (double) *Nparticles, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(weights, weights_GPU, sizeof (double) *Nparticles, hipMemcpyDeviceToHost));
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
transferTime += elapsedTime * 1.e-3;
xe = 0;
ye = 0;
// estimate the object location by expected values
for (x = 0; x < Nparticles; x++) {
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
if(verbose && !quiet) {
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt(pow((double) (xe - (int) roundDouble(IszY / 2.0)), 2) + pow((double) (ye - (int) roundDouble(IszX / 2.0)), 2));
printf("%lf\n", distance);
}
char atts[1024];
sprintf(atts, "dimx:%d, dimy:%d, numframes:%d, numparticles:%d", IszX, IszY, Nfr, Nparticles);
resultDB.AddResult("particlefilter_float_kernel_time", atts, "sec", kernelTime);
resultDB.AddResult("particlefilter_float_transfer_time", atts, "sec", transferTime);
resultDB.AddResult("particlefilter_float_total_time", atts, "sec", kernelTime+transferTime);
resultDB.AddResult("particlefilter_float_parity", atts, "N", transferTime / kernelTime);
resultDB.AddOverall("Time", "sec", kernelTime+transferTime);
//CUDA freeing of memory
hipFree(weights_GPU);
hipFree(arrayY_GPU);
hipFree(arrayX_GPU);
//free regular memory
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(ind);
free(u);
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilterGraph(unsigned char * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles, ResultDatabase &resultDB) {
float kernelTime = 0.0f;
float transferTime = 0.0f;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsedTime;
int max_size = IszX * IszY*Nfr;
//original particle centroid
double xe = roundDouble(IszY / 2.0);
double ye = roundDouble(IszX / 2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius * 2 - 1;
int * disk = (int*) malloc(diameter * diameter * sizeof (int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (disk[x * diameter + y] == 1)
countOnes++;
}
}
int * objxy = (int *) malloc(countOnes * 2 * sizeof (int));
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
double * weights = (double *) malloc(sizeof (double) *Nparticles);
for (x = 0; x < Nparticles; x++) {
weights[x] = 1 / ((double) (Nparticles));
}
//initial likelihood to 0.0
double * likelihood = (double *) malloc(sizeof (double) *Nparticles);
double * arrayX = (double *) malloc(sizeof (double) *Nparticles);
double * arrayY = (double *) malloc(sizeof (double) *Nparticles);
double * xj = (double *) malloc(sizeof (double) *Nparticles);
double * yj = (double *) malloc(sizeof (double) *Nparticles);
double * CDF = (double *) malloc(sizeof (double) *Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
double * likelihood_GPU;
unsigned char * I_GPU;
double * weights_GPU;
int * objxy_GPU;
int * ind = (int*) malloc(sizeof (int) *countOnes * Nparticles);
int * ind_GPU;
double * u = (double *) malloc(sizeof (double) *Nparticles);
double * u_GPU;
int * seed_GPU;
double* partial_sums;
//CUDA memory allocation
CUDA_SAFE_CALL(hipMalloc((void **) &arrayX_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &arrayY_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &xj_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &yj_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &CDF_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &u_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &likelihood_GPU, sizeof (double) *Nparticles));
//set likelihood to zero
CUDA_SAFE_CALL(hipMemset((void *) likelihood_GPU, 0, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &weights_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &I_GPU, sizeof (unsigned char) *IszX * IszY * Nfr));
CUDA_SAFE_CALL(hipMalloc((void **) &objxy_GPU, sizeof (int) *2 * countOnes));
CUDA_SAFE_CALL(hipMalloc((void **) &ind_GPU, sizeof (int) *countOnes * Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &seed_GPU, sizeof (int) *Nparticles));
CUDA_SAFE_CALL(hipMalloc((void **) &partial_sums, sizeof (double) *Nparticles));
//Donnie - this loop is different because in this kernel, arrayX and arrayY
// are set equal to xj before every iteration, so effectively, arrayX and
// arrayY will be set to xe and ye before the first iteration.
for (x = 0; x < Nparticles; x++) {
xj[x] = xe;
yj[x] = ye;
}
int k;
//start send
hipEventRecord(start, 0);
CUDA_SAFE_CALL(hipMemcpy(I_GPU, I, sizeof (unsigned char) *IszX * IszY*Nfr, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(objxy_GPU, objxy, sizeof (int) *2 * countOnes, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(weights_GPU, weights, sizeof (double) *Nparticles, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(xj_GPU, xj, sizeof (double) *Nparticles, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(yj_GPU, yj, sizeof (double) *Nparticles, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(seed_GPU, seed, sizeof (int) *Nparticles, hipMemcpyHostToDevice));
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
transferTime += elapsedTime * 1.e-3;
// Init graph metadata
hipStream_t streamForGraph;
hipGraph_t graph;
hipGraphNode_t likelihoodKernelNode, sumKernelNode, normalizeWeightsKernelNode, findIndexKernelNode;
checkCudaErrors(cudaGraphCreate(&graph, 0));
checkCudaErrors(hipStreamCreate(&streamForGraph));
// Set up first kernel node
cudaKernelNodeParams likelihoodKernelNodeParams = {0};
void *likelihoodKernelArgs[19] = {(void *)&arrayX_GPU, (void *)&arrayY_GPU,
(void *)&xj_GPU, (void *)&yj_GPU,
(void *)&CDF_GPU, (void *)&ind_GPU,
(void *)&objxy_GPU, (void *)&likelihood_GPU,
(void *)&I_GPU, (void *)&u_GPU,
(void *)&weights_GPU, &Nparticles,
&countOnes, &max_size, &k, &IszY,
&Nfr, (void *)&seed_GPU, (void *)&partial_sums};
likelihoodKernelNodeParams.func = (void *)likelihood_kernel;
likelihoodKernelNodeParams.gridDim = dim3(num_blocks, 1, 1);
likelihoodKernelNodeParams.blockDim = dim3(threads_per_block, 1, 1);
likelihoodKernelNodeParams.sharedMemBytes = 0;
likelihoodKernelNodeParams.kernelParams = (void **)likelihoodKernelArgs;
likelihoodKernelNodeParams.extra = NULL;
checkCudaErrors(cudaGraphAddKernelNode(&likelihoodKernelNode, graph, NULL, 0, &likelihoodKernelNodeParams));
// Set up the second kernel node
cudaKernelNodeParams sumKernelNodeParams = {0};
void *sumKernelArgs[2] = {(void *)&partial_sums, &Nparticles};
sumKernelNodeParams.func = (void *)sum_kernel;
sumKernelNodeParams.gridDim = dim3(num_blocks, 1, 1);
sumKernelNodeParams.blockDim = dim3(threads_per_block, 1, 1);
sumKernelNodeParams.sharedMemBytes = 0;
sumKernelNodeParams.kernelParams = (void **)sumKernelArgs;
sumKernelNodeParams.extra = NULL;
checkCudaErrors(cudaGraphAddKernelNode(&sumKernelNode, graph, NULL, 0, &sumKernelNodeParams));
// set up the third kernel node
cudaKernelNodeParams normalizeWeightsKernelNodeParams = {0};
void *normalizeWeightsKernelArgs[6] = {(void *)&weights_GPU, &Nparticles,
(void *)&partial_sums, (void *)&CDF_GPU,
(void *)&u_GPU, (void *)&seed_GPU};
normalizeWeightsKernelNodeParams.func = (void *)normalize_weights_kernel;
normalizeWeightsKernelNodeParams.gridDim = dim3(num_blocks, 1, 1);
normalizeWeightsKernelNodeParams.blockDim = dim3(threads_per_block, 1, 1);
normalizeWeightsKernelNodeParams.sharedMemBytes = 0;
normalizeWeightsKernelNodeParams.kernelParams = (void **)normalizeWeightsKernelArgs;
normalizeWeightsKernelNodeParams.extra = NULL;
checkCudaErrors(cudaGraphAddKernelNode(&normalizeWeightsKernelNode, graph, NULL, 0, &normalizeWeightsKernelNodeParams));
// set up the fourth kernel node
cudaKernelNodeParams findIndexKernelNodeParams = {0};
void *findIndexKernelArgs[8] = {(void *)&arrayX_GPU, (void *)&arrayY_GPU, (void *)&CDF_GPU,
(void *)&u_GPU, (void *)&xj_GPU,
(void *)&yj_GPU, (void *)&weights_GPU,
&Nparticles};
findIndexKernelNodeParams.func = (void *)find_index_kernel;
findIndexKernelNodeParams.gridDim = dim3(num_blocks, 1, 1);
findIndexKernelNodeParams.blockDim = dim3(threads_per_block, 1, 1);
findIndexKernelNodeParams.sharedMemBytes = 0;
findIndexKernelNodeParams.kernelParams = (void **)findIndexKernelArgs;
findIndexKernelNodeParams.extra = NULL;
checkCudaErrors(cudaGraphAddKernelNode(&findIndexKernelNode, graph, NULL, 0, &findIndexKernelNodeParams));
// Add dependencies between each kernels
checkCudaErrors(cudaGraphAddDependencies(graph, &likelihoodKernelNode, &sumKernelNode, 1));
checkCudaErrors(cudaGraphAddDependencies(graph, &sumKernelNode, &normalizeWeightsKernelNode, 1));
checkCudaErrors(cudaGraphAddDependencies(graph, &normalizeWeightsKernelNode, &findIndexKernelNode, 1));
// init the graph
hipGraphExec_t graphExec;
checkCudaErrors(hipGraphInstantiate(&graphExec, graph, NULL, NULL, 0));
double wall1 = get_wall_time();
for (k = 1; k < Nfr; k++) {
checkCudaErrors(hipEventRecord(start, 0));
checkCudaErrors(hipGraphLaunch(graphExec, streamForGraph));
checkCudaErrors(hipEventRecord(stop, 0));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
kernelTime += elapsedTime * 1.e-3;
}//end loop
//block till kernels are finished
checkCudaErrors(hipStreamSynchronize(streamForGraph));
double wall2 = get_wall_time();
checkCudaErrors(hipGraphExecDestroy(graphExec));
checkCudaErrors(hipGraphDestroy(graph));
checkCudaErrors(hipStreamDestroy(streamForGraph));
hipFree(xj_GPU);
hipFree(yj_GPU);
hipFree(CDF_GPU);
hipFree(u_GPU);
hipFree(likelihood_GPU);
hipFree(I_GPU);
hipFree(objxy_GPU);
hipFree(ind_GPU);
hipFree(seed_GPU);
hipFree(partial_sums);
hipEventRecord(start, 0);
CUDA_SAFE_CALL(hipMemcpy(arrayX, arrayX_GPU, sizeof (double) *Nparticles, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(arrayY, arrayY_GPU, sizeof (double) *Nparticles, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(weights, weights_GPU, sizeof (double) *Nparticles, hipMemcpyDeviceToHost));
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
transferTime += elapsedTime * 1.e-3;
xe = 0;
ye = 0;
// estimate the object location by expected values
for (x = 0; x < Nparticles; x++) {
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
if(verbose && !quiet) {
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt(pow((double) (xe - (int) roundDouble(IszY / 2.0)), 2) + pow((double) (ye - (int) roundDouble(IszX / 2.0)), 2));
printf("%lf\n", distance);
}
char atts[1024];
sprintf(atts, "dimx:%d, dimy:%d, numframes:%d, numparticles:%d", IszX, IszY, Nfr, Nparticles);
resultDB.AddResult("particlefilter_float_kernel_time", atts, "sec", kernelTime);
resultDB.AddResult("particlefilter_float_transfer_time", atts, "sec", transferTime);
resultDB.AddResult("particlefilter_float_total_time", atts, "sec", kernelTime+transferTime);
resultDB.AddResult("particlefilter_float_parity", atts, "N", transferTime / kernelTime);
resultDB.AddOverall("Time", "sec", kernelTime+transferTime);
//CUDA freeing of memory
hipFree(weights_GPU);
hipFree(arrayY_GPU);
hipFree(arrayX_GPU);
//free regular memory
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(ind);
free(u);
}
void addBenchmarkSpecOptions(OptionParser &op) {
op.addOption("dimx", OPT_INT, "0", "grid x dimension", 'x');
op.addOption("dimy", OPT_INT, "0", "grid y dimension", 'y');
op.addOption("framecount", OPT_INT, "0", "number of frames to track across", 'f');
op.addOption("np", OPT_INT, "0", "number of particles to use");
op.addOption("graph", OPT_BOOL, "0", "use CUDA Graph implementation");
}
void particlefilter_float(ResultDatabase &resultDB, int args[], bool useGraph);
void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) {
printf("Running ParticleFilter (float)\n");
int args[4];
args[0] = op.getOptionInt("dimx");
args[1] = op.getOptionInt("dimy");
args[2] = op.getOptionInt("framecount");
args[3] = op.getOptionInt("np");
bool preset = false;
verbose = op.getOptionBool("verbose");
quiet = op.getOptionBool("quiet");
bool useGraph = op.getOptionBool("graph");
for(int i = 0; i < 4; i++) {
if(args[i] <= 0) {
preset = true;
}
}
if(preset) {
int probSizes[4][4] = {{10, 10, 2, 100},
{40, 40, 5, 500},
{200, 200, 8, 500000},
{500, 500, 15, 1000000}};
int size = op.getOptionInt("size") - 1;
for(int i = 0; i < 4; i++) {
args[i] = probSizes[size][i];
}
}
if(!quiet) {
printf("Using dimx=%d, dimy=%d, framecount=%d, numparticles=%d\n",
args[0], args[1], args[2], args[3]);
}
int passes = op.getOptionInt("passes");
for(int i = 0; i < passes; i++) {
if(!quiet) {
printf("Pass %d: ", i);
}
particlefilter_float(resultDB, args, useGraph);
if(!quiet) {
printf("Done.\n");
}
}
}
void particlefilter_float(ResultDatabase &resultDB, int args[], bool useGraph) {
int IszX, IszY, Nfr, Nparticles;
IszX = args[0];
IszY = args[1];
Nfr = args[2];
Nparticles = args[3];
//establish seed
int * seed = (int *) malloc(sizeof (int) *Nparticles);
int i;
for (i = 0; i < Nparticles; i++)
seed[i] = time(0) * i;
//malloc matrix
unsigned char * I = (unsigned char *) malloc(sizeof (unsigned char) *IszX * IszY * Nfr);
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
//call particle filter
if (useGraph) particleFilterGraph(I, IszX, IszY, Nfr, seed, Nparticles, resultDB);
else particleFilter(I, IszX, IszY, Nfr, seed, Nparticles, resultDB);
free(seed);
free(I);
}
| 75ebcc4026a06b16a980d930b1b3320d808291f9.cu | ////////////////////////////////////////////////////////////////////////////////////////////////////
// file: altis\src\cuda\level2\particlefilter\ex_particle_CUDA_float_seq.cu
//
// summary: Exception particle cuda float sequence class
//
// origin: Rodinia (http://rodinia.cs.virginia.edu/doku.php)
////////////////////////////////////////////////////////////////////////////////////////////////////
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <time.h>
#include <sys/time.h>
#include "OptionParser.h"
#include "ResultDatabase.h"
#include "cudacommon.h"
#define BLOCK_X 16
#define BLOCK_Y 16
#define PI 3.1415926535897932
const int threads_per_block = 512;
bool verbose = false;
bool quiet = false;
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
double get_wall_time(){
struct timeval time;
if (gettimeofday(&time,NULL)){
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
/********************************
* CALC LIKELIHOOD SUM
* DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* param 1 I 3D matrix
* param 2 current ind array
* param 3 length of ind array
* returns a double representing the sum
********************************/
__device__ double calcLikelihoodSum(unsigned char * I, int * ind, int numOnes, int index) {
double likelihoodSum = 0.0;
int x;
for (x = 0; x < numOnes; x++)
likelihoodSum += (pow((double) (I[ind[index * numOnes + x]] - 100), 2) - pow((double) (I[ind[index * numOnes + x]] - 228), 2)) / 50.0;
return likelihoodSum;
}
/****************************
CDF CALCULATE
CALCULATES CDF
param1 CDF
param2 weights
param3 Nparticles
*****************************/
__device__ void cdfCalc(double * CDF, double * weights, int Nparticles) {
int x;
CDF[0] = weights[0];
for (x = 1; x < Nparticles; x++) {
CDF[x] = weights[x] + CDF[x - 1];
}
}
/*****************************
* RANDU
* GENERATES A UNIFORM DISTRIBUTION
* returns a double representing a randomily generated number from a uniform distribution with range [0, 1)
******************************/
__device__ double d_randu(int * seed, int index) {
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index) {
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index) {
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * PI * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
double test_randn(int * seed, int index) {
//Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
__device__ double d_randn(int * seed, int index) {
//Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = d_randu(seed, index);
double v = d_randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
/****************************
UPDATE WEIGHTS
UPDATES WEIGHTS
param1 weights
param2 likelihood
param3 Nparticles
****************************/
__device__ double updateWeights(double * weights, double * likelihood, int Nparticles) {
int x;
double sum = 0;
for (x = 0; x < Nparticles; x++) {
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value) {
if (endIndex < beginIndex)
return -1;
int middleIndex;
while (endIndex > beginIndex) {
middleIndex = beginIndex + ((endIndex - beginIndex) / 2);
if (CDF[middleIndex] >= value) {
if (middleIndex == 0)
return middleIndex;
else if (CDF[middleIndex - 1] < value)
return middleIndex;
else if (CDF[middleIndex - 1] == value) {
while (CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if (CDF[middleIndex] > value)
endIndex = middleIndex - 1;
else
beginIndex = middleIndex + 1;
}
return -1;
}
/** added this function. was missing in original double version.
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
__device__ double dev_round_double(double value) {
int newValue = (int) (value);
if (value - newValue < .5f)
return newValue;
else
return newValue++;
}
/*****************************
* CUDA Find Index Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: weights
* param8: Nparticles
*****************************/
__global__ void find_index_kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, double * weights, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i < Nparticles) {
int index = -1;
int x;
for (x = 0; x < Nparticles; x++) {
if (CDF[x] >= u[i]) {
index = x;
break;
}
}
if (index == -1) {
index = Nparticles - 1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
//weights[i] = 1 / ((double) (Nparticles)); //moved this code to the beginning of likelihood kernel
}
__syncthreads();
}
__global__ void normalize_weights_kernel(double * weights, int Nparticles, double* partial_sums, double * CDF, double * u, int * seed) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
__shared__ double u1, sumWeights;
if(0 == threadIdx.x)
sumWeights = partial_sums[0];
__syncthreads();
if (i < Nparticles) {
weights[i] = weights[i] / sumWeights;
}
__syncthreads();
if (i == 0) {
cdfCalc(CDF, weights, Nparticles);
u[0] = (1 / ((double) (Nparticles))) * d_randu(seed, i); // do this to allow all threads in all blocks to use the same u1
}
__syncthreads();
if(0 == threadIdx.x)
u1 = u[0];
__syncthreads();
if (i < Nparticles) {
u[i] = u1 + i / ((double) (Nparticles));
}
}
__global__ void sum_kernel(double* partial_sums, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i == 0) {
int x;
double sum = 0.0;
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
for (x = 0; x < num_blocks; x++) {
sum += partial_sums[x];
}
partial_sums[0] = sum;
}
}
/*****************************
* CUDA Likelihood Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param2.5: CDF
* param3: ind
* param4: objxy
* param5: likelihood
* param6: I
* param6.5: u
* param6.75: weights
* param7: Nparticles
* param8: countOnes
* param9: max_size
* param10: k
* param11: IszY
* param12: Nfr
*****************************/
__global__ void likelihood_kernel(double * arrayX, double * arrayY, double * xj, double * yj, double * CDF, int * ind, int * objxy, double * likelihood, unsigned char * I, double * u, double * weights, int Nparticles, int countOnes, int max_size, int k, int IszY, int Nfr, int *seed, double* partial_sums) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
int indX, indY;
__shared__ double buffer[512];
if (i < Nparticles) {
arrayX[i] = xj[i];
arrayY[i] = yj[i];
weights[i] = 1 / ((double) (Nparticles)); //Donnie - moved this line from end of find_index_kernel to prevent all weights from being reset before calculating position on final iteration.
arrayX[i] = arrayX[i] + 1.0 + 5.0 * d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0 * d_randn(seed, i);
}
__syncthreads();
if (i < Nparticles) {
for (y = 0; y < countOnes; y++) {
//added dev_round_double() to be consistent with roundDouble
indX = dev_round_double(arrayX[i]) + objxy[y * 2 + 1];
indY = dev_round_double(arrayY[i]) + objxy[y * 2];
ind[i * countOnes + y] = abs(indX * IszY * Nfr + indY * Nfr + k);
if (ind[i * countOnes + y] >= max_size)
ind[i * countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i] / countOnes;
weights[i] = weights[i] * exp(likelihood[i]); //Donnie Newell - added the missing exponential function call
}
buffer[threadIdx.x] = 0.0;
__syncthreads();
if (i < Nparticles) {
buffer[threadIdx.x] = weights[i];
}
__syncthreads();
//this doesn't account for the last block that isn't full
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value) {
int newValue = (int) (value);
if (value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, unsigned char * array3D, int * dimX, int * dimY, int * dimZ) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
if (array3D[x * *dimY * *dimZ + y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(unsigned char * array3D, int * dimX, int * dimY, int * dimZ, int * seed) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (unsigned char) (5 * randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius) {
int diameter = radius * 2 - 1;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
double distance = sqrt(pow((double) (x - radius + 1), 2) + pow((double) (y - radius + 1), 2));
if (distance < radius) {
disk[x * diameter + y] = 1;
} else {
disk[x * diameter + y] = 0;
}
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(unsigned char * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) {
int startX = posX - error;
while (startX < 0)
startX++;
int startY = posY - error;
while (startY < 0)
startY++;
int endX = posX + error;
while (endX > dimX)
endX--;
int endY = posY + error;
while (endY > dimY)
endY--;
int x, y;
for (x = startX; x < endX; x++) {
for (y = startY; y < endY; y++) {
double distance = sqrt(pow((double) (x - posX), 2) + pow((double) (y - posY), 2));
if (distance < error)
matrix[x * dimY * dimZ + y * dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(unsigned char * matrix, int dimX, int dimY, int dimZ, int error, unsigned char * newMatrix) {
int x, y, z;
for (z = 0; z < dimZ; z++) {
for (x = 0; x < dimX; x++) {
for (y = 0; y < dimY; y++) {
if (matrix[x * dimY * dimZ + y * dimZ + z] == 1) {
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, int * neighbors, int radius) {
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius * 2 - 1;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (se[x * diameter + y]) {
neighbors[neighY * 2] = (int) (y - center);
neighbors[neighY * 2 + 1] = (int) (x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the background intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(unsigned char * I, int IszX, int IszY, int Nfr, int * seed) {
int k;
int max_size = IszX * IszY * Nfr;
/*get object centers*/
int x0 = (int) roundDouble(IszY / 2.0);
int y0 = (int) roundDouble(IszX / 2.0);
I[x0 * IszY * Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for (k = 1; k < Nfr; k++) {
xk = abs(x0 + (k-1));
yk = abs(y0 - 2 * (k-1));
pos = yk * IszY * Nfr + xk * Nfr + k;
if (pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
unsigned char * newMatrix = (unsigned char *) malloc(sizeof (unsigned char) * IszX * IszY * Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for (x = 0; x < IszX; x++) {
for (y = 0; y < IszY; y++) {
for (k = 0; k < Nfr; k++) {
I[x * IszY * Nfr + y * Nfr + k] = newMatrix[x * IszY * Nfr + y * Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value) {
int index = -1;
int x;
for (x = 0; x < lengthCDF; x++) {
if (CDF[x] >= value) {
index = x;
break;
}
}
if (index == -1) {
return lengthCDF - 1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(unsigned char * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles, ResultDatabase &resultDB) {
float kernelTime = 0.0f;
float transferTime = 0.0f;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsedTime;
int max_size = IszX * IszY*Nfr;
//original particle centroid
double xe = roundDouble(IszY / 2.0);
double ye = roundDouble(IszX / 2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius * 2 - 1;
int * disk = (int*) malloc(diameter * diameter * sizeof (int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (disk[x * diameter + y] == 1)
countOnes++;
}
}
int * objxy = (int *) malloc(countOnes * 2 * sizeof (int));
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
double * weights = (double *) malloc(sizeof (double) *Nparticles);
for (x = 0; x < Nparticles; x++) {
weights[x] = 1 / ((double) (Nparticles));
}
//initial likelihood to 0.0
double * likelihood = (double *) malloc(sizeof (double) *Nparticles);
double * arrayX = (double *) malloc(sizeof (double) *Nparticles);
double * arrayY = (double *) malloc(sizeof (double) *Nparticles);
double * xj = (double *) malloc(sizeof (double) *Nparticles);
double * yj = (double *) malloc(sizeof (double) *Nparticles);
double * CDF = (double *) malloc(sizeof (double) *Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
double * likelihood_GPU;
unsigned char * I_GPU;
double * weights_GPU;
int * objxy_GPU;
int * ind = (int*) malloc(sizeof (int) *countOnes * Nparticles);
int * ind_GPU;
double * u = (double *) malloc(sizeof (double) *Nparticles);
double * u_GPU;
int * seed_GPU;
double* partial_sums;
//CUDA memory allocation
CUDA_SAFE_CALL(cudaMalloc((void **) &arrayX_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &arrayY_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &xj_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &yj_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &CDF_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &u_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &likelihood_GPU, sizeof (double) *Nparticles));
//set likelihood to zero
CUDA_SAFE_CALL(cudaMemset((void *) likelihood_GPU, 0, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &weights_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &I_GPU, sizeof (unsigned char) *IszX * IszY * Nfr));
CUDA_SAFE_CALL(cudaMalloc((void **) &objxy_GPU, sizeof (int) *2 * countOnes));
CUDA_SAFE_CALL(cudaMalloc((void **) &ind_GPU, sizeof (int) *countOnes * Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &seed_GPU, sizeof (int) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &partial_sums, sizeof (double) *Nparticles));
//Donnie - this loop is different because in this kernel, arrayX and arrayY
// are set equal to xj before every iteration, so effectively, arrayX and
// arrayY will be set to xe and ye before the first iteration.
for (x = 0; x < Nparticles; x++) {
xj[x] = xe;
yj[x] = ye;
}
int k;
//start send
cudaEventRecord(start, 0);
CUDA_SAFE_CALL(cudaMemcpy(I_GPU, I, sizeof (unsigned char) *IszX * IszY*Nfr, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(objxy_GPU, objxy, sizeof (int) *2 * countOnes, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(weights_GPU, weights, sizeof (double) *Nparticles, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(xj_GPU, xj, sizeof (double) *Nparticles, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(yj_GPU, yj, sizeof (double) *Nparticles, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(seed_GPU, seed, sizeof (int) *Nparticles, cudaMemcpyHostToDevice));
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
transferTime += elapsedTime * 1.e-3;
double wall1 = get_wall_time();
for (k = 1; k < Nfr; k++) {
cudaEventRecord(start, 0);
likelihood_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU,
arrayY_GPU, xj_GPU, yj_GPU, CDF_GPU, ind_GPU, objxy_GPU,
likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles,
countOnes, max_size, k, IszY, Nfr, seed_GPU, partial_sums);
sum_kernel << < num_blocks, threads_per_block >> > (partial_sums, Nparticles);
normalize_weights_kernel << < num_blocks, threads_per_block >> > (weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU);
find_index_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR();
}//end loop
//block till kernels are finished
cudaDeviceSynchronize();
double wall2 = get_wall_time();
cudaFree(xj_GPU);
cudaFree(yj_GPU);
cudaFree(CDF_GPU);
cudaFree(u_GPU);
cudaFree(likelihood_GPU);
cudaFree(I_GPU);
cudaFree(objxy_GPU);
cudaFree(ind_GPU);
cudaFree(seed_GPU);
cudaFree(partial_sums);
cudaEventRecord(start, 0);
CUDA_SAFE_CALL(cudaMemcpy(arrayX, arrayX_GPU, sizeof (double) *Nparticles, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(arrayY, arrayY_GPU, sizeof (double) *Nparticles, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(weights, weights_GPU, sizeof (double) *Nparticles, cudaMemcpyDeviceToHost));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
transferTime += elapsedTime * 1.e-3;
xe = 0;
ye = 0;
// estimate the object location by expected values
for (x = 0; x < Nparticles; x++) {
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
if(verbose && !quiet) {
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt(pow((double) (xe - (int) roundDouble(IszY / 2.0)), 2) + pow((double) (ye - (int) roundDouble(IszX / 2.0)), 2));
printf("%lf\n", distance);
}
char atts[1024];
sprintf(atts, "dimx:%d, dimy:%d, numframes:%d, numparticles:%d", IszX, IszY, Nfr, Nparticles);
resultDB.AddResult("particlefilter_float_kernel_time", atts, "sec", kernelTime);
resultDB.AddResult("particlefilter_float_transfer_time", atts, "sec", transferTime);
resultDB.AddResult("particlefilter_float_total_time", atts, "sec", kernelTime+transferTime);
resultDB.AddResult("particlefilter_float_parity", atts, "N", transferTime / kernelTime);
resultDB.AddOverall("Time", "sec", kernelTime+transferTime);
//CUDA freeing of memory
cudaFree(weights_GPU);
cudaFree(arrayY_GPU);
cudaFree(arrayX_GPU);
//free regular memory
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(ind);
free(u);
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilterGraph(unsigned char * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles, ResultDatabase &resultDB) {
float kernelTime = 0.0f;
float transferTime = 0.0f;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsedTime;
int max_size = IszX * IszY*Nfr;
//original particle centroid
double xe = roundDouble(IszY / 2.0);
double ye = roundDouble(IszX / 2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius * 2 - 1;
int * disk = (int*) malloc(diameter * diameter * sizeof (int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (disk[x * diameter + y] == 1)
countOnes++;
}
}
int * objxy = (int *) malloc(countOnes * 2 * sizeof (int));
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
double * weights = (double *) malloc(sizeof (double) *Nparticles);
for (x = 0; x < Nparticles; x++) {
weights[x] = 1 / ((double) (Nparticles));
}
//initial likelihood to 0.0
double * likelihood = (double *) malloc(sizeof (double) *Nparticles);
double * arrayX = (double *) malloc(sizeof (double) *Nparticles);
double * arrayY = (double *) malloc(sizeof (double) *Nparticles);
double * xj = (double *) malloc(sizeof (double) *Nparticles);
double * yj = (double *) malloc(sizeof (double) *Nparticles);
double * CDF = (double *) malloc(sizeof (double) *Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
double * likelihood_GPU;
unsigned char * I_GPU;
double * weights_GPU;
int * objxy_GPU;
int * ind = (int*) malloc(sizeof (int) *countOnes * Nparticles);
int * ind_GPU;
double * u = (double *) malloc(sizeof (double) *Nparticles);
double * u_GPU;
int * seed_GPU;
double* partial_sums;
//CUDA memory allocation
CUDA_SAFE_CALL(cudaMalloc((void **) &arrayX_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &arrayY_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &xj_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &yj_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &CDF_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &u_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &likelihood_GPU, sizeof (double) *Nparticles));
//set likelihood to zero
CUDA_SAFE_CALL(cudaMemset((void *) likelihood_GPU, 0, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &weights_GPU, sizeof (double) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &I_GPU, sizeof (unsigned char) *IszX * IszY * Nfr));
CUDA_SAFE_CALL(cudaMalloc((void **) &objxy_GPU, sizeof (int) *2 * countOnes));
CUDA_SAFE_CALL(cudaMalloc((void **) &ind_GPU, sizeof (int) *countOnes * Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &seed_GPU, sizeof (int) *Nparticles));
CUDA_SAFE_CALL(cudaMalloc((void **) &partial_sums, sizeof (double) *Nparticles));
//Donnie - this loop is different because in this kernel, arrayX and arrayY
// are set equal to xj before every iteration, so effectively, arrayX and
// arrayY will be set to xe and ye before the first iteration.
for (x = 0; x < Nparticles; x++) {
xj[x] = xe;
yj[x] = ye;
}
int k;
//start send
cudaEventRecord(start, 0);
CUDA_SAFE_CALL(cudaMemcpy(I_GPU, I, sizeof (unsigned char) *IszX * IszY*Nfr, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(objxy_GPU, objxy, sizeof (int) *2 * countOnes, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(weights_GPU, weights, sizeof (double) *Nparticles, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(xj_GPU, xj, sizeof (double) *Nparticles, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(yj_GPU, yj, sizeof (double) *Nparticles, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(seed_GPU, seed, sizeof (int) *Nparticles, cudaMemcpyHostToDevice));
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
transferTime += elapsedTime * 1.e-3;
// Init graph metadata
cudaStream_t streamForGraph;
cudaGraph_t graph;
cudaGraphNode_t likelihoodKernelNode, sumKernelNode, normalizeWeightsKernelNode, findIndexKernelNode;
checkCudaErrors(cudaGraphCreate(&graph, 0));
checkCudaErrors(cudaStreamCreate(&streamForGraph));
// Set up first kernel node
cudaKernelNodeParams likelihoodKernelNodeParams = {0};
void *likelihoodKernelArgs[19] = {(void *)&arrayX_GPU, (void *)&arrayY_GPU,
(void *)&xj_GPU, (void *)&yj_GPU,
(void *)&CDF_GPU, (void *)&ind_GPU,
(void *)&objxy_GPU, (void *)&likelihood_GPU,
(void *)&I_GPU, (void *)&u_GPU,
(void *)&weights_GPU, &Nparticles,
&countOnes, &max_size, &k, &IszY,
&Nfr, (void *)&seed_GPU, (void *)&partial_sums};
likelihoodKernelNodeParams.func = (void *)likelihood_kernel;
likelihoodKernelNodeParams.gridDim = dim3(num_blocks, 1, 1);
likelihoodKernelNodeParams.blockDim = dim3(threads_per_block, 1, 1);
likelihoodKernelNodeParams.sharedMemBytes = 0;
likelihoodKernelNodeParams.kernelParams = (void **)likelihoodKernelArgs;
likelihoodKernelNodeParams.extra = NULL;
checkCudaErrors(cudaGraphAddKernelNode(&likelihoodKernelNode, graph, NULL, 0, &likelihoodKernelNodeParams));
// Set up the second kernel node
cudaKernelNodeParams sumKernelNodeParams = {0};
void *sumKernelArgs[2] = {(void *)&partial_sums, &Nparticles};
sumKernelNodeParams.func = (void *)sum_kernel;
sumKernelNodeParams.gridDim = dim3(num_blocks, 1, 1);
sumKernelNodeParams.blockDim = dim3(threads_per_block, 1, 1);
sumKernelNodeParams.sharedMemBytes = 0;
sumKernelNodeParams.kernelParams = (void **)sumKernelArgs;
sumKernelNodeParams.extra = NULL;
checkCudaErrors(cudaGraphAddKernelNode(&sumKernelNode, graph, NULL, 0, &sumKernelNodeParams));
// set up the third kernel node
cudaKernelNodeParams normalizeWeightsKernelNodeParams = {0};
void *normalizeWeightsKernelArgs[6] = {(void *)&weights_GPU, &Nparticles,
(void *)&partial_sums, (void *)&CDF_GPU,
(void *)&u_GPU, (void *)&seed_GPU};
normalizeWeightsKernelNodeParams.func = (void *)normalize_weights_kernel;
normalizeWeightsKernelNodeParams.gridDim = dim3(num_blocks, 1, 1);
normalizeWeightsKernelNodeParams.blockDim = dim3(threads_per_block, 1, 1);
normalizeWeightsKernelNodeParams.sharedMemBytes = 0;
normalizeWeightsKernelNodeParams.kernelParams = (void **)normalizeWeightsKernelArgs;
normalizeWeightsKernelNodeParams.extra = NULL;
checkCudaErrors(cudaGraphAddKernelNode(&normalizeWeightsKernelNode, graph, NULL, 0, &normalizeWeightsKernelNodeParams));
// set up the fourth kernel node
cudaKernelNodeParams findIndexKernelNodeParams = {0};
void *findIndexKernelArgs[8] = {(void *)&arrayX_GPU, (void *)&arrayY_GPU, (void *)&CDF_GPU,
(void *)&u_GPU, (void *)&xj_GPU,
(void *)&yj_GPU, (void *)&weights_GPU,
&Nparticles};
findIndexKernelNodeParams.func = (void *)find_index_kernel;
findIndexKernelNodeParams.gridDim = dim3(num_blocks, 1, 1);
findIndexKernelNodeParams.blockDim = dim3(threads_per_block, 1, 1);
findIndexKernelNodeParams.sharedMemBytes = 0;
findIndexKernelNodeParams.kernelParams = (void **)findIndexKernelArgs;
findIndexKernelNodeParams.extra = NULL;
checkCudaErrors(cudaGraphAddKernelNode(&findIndexKernelNode, graph, NULL, 0, &findIndexKernelNodeParams));
// Add dependencies between each kernels
checkCudaErrors(cudaGraphAddDependencies(graph, &likelihoodKernelNode, &sumKernelNode, 1));
checkCudaErrors(cudaGraphAddDependencies(graph, &sumKernelNode, &normalizeWeightsKernelNode, 1));
checkCudaErrors(cudaGraphAddDependencies(graph, &normalizeWeightsKernelNode, &findIndexKernelNode, 1));
// init the graph
cudaGraphExec_t graphExec;
checkCudaErrors(cudaGraphInstantiate(&graphExec, graph, NULL, NULL, 0));
double wall1 = get_wall_time();
for (k = 1; k < Nfr; k++) {
checkCudaErrors(cudaEventRecord(start, 0));
checkCudaErrors(cudaGraphLaunch(graphExec, streamForGraph));
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
kernelTime += elapsedTime * 1.e-3;
}//end loop
//block till kernels are finished
checkCudaErrors(cudaStreamSynchronize(streamForGraph));
double wall2 = get_wall_time();
checkCudaErrors(cudaGraphExecDestroy(graphExec));
checkCudaErrors(cudaGraphDestroy(graph));
checkCudaErrors(cudaStreamDestroy(streamForGraph));
cudaFree(xj_GPU);
cudaFree(yj_GPU);
cudaFree(CDF_GPU);
cudaFree(u_GPU);
cudaFree(likelihood_GPU);
cudaFree(I_GPU);
cudaFree(objxy_GPU);
cudaFree(ind_GPU);
cudaFree(seed_GPU);
cudaFree(partial_sums);
cudaEventRecord(start, 0);
CUDA_SAFE_CALL(cudaMemcpy(arrayX, arrayX_GPU, sizeof (double) *Nparticles, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(arrayY, arrayY_GPU, sizeof (double) *Nparticles, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(weights, weights_GPU, sizeof (double) *Nparticles, cudaMemcpyDeviceToHost));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
transferTime += elapsedTime * 1.e-3;
xe = 0;
ye = 0;
// estimate the object location by expected values
for (x = 0; x < Nparticles; x++) {
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
if(verbose && !quiet) {
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt(pow((double) (xe - (int) roundDouble(IszY / 2.0)), 2) + pow((double) (ye - (int) roundDouble(IszX / 2.0)), 2));
printf("%lf\n", distance);
}
char atts[1024];
sprintf(atts, "dimx:%d, dimy:%d, numframes:%d, numparticles:%d", IszX, IszY, Nfr, Nparticles);
resultDB.AddResult("particlefilter_float_kernel_time", atts, "sec", kernelTime);
resultDB.AddResult("particlefilter_float_transfer_time", atts, "sec", transferTime);
resultDB.AddResult("particlefilter_float_total_time", atts, "sec", kernelTime+transferTime);
resultDB.AddResult("particlefilter_float_parity", atts, "N", transferTime / kernelTime);
resultDB.AddOverall("Time", "sec", kernelTime+transferTime);
//CUDA freeing of memory
cudaFree(weights_GPU);
cudaFree(arrayY_GPU);
cudaFree(arrayX_GPU);
//free regular memory
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(ind);
free(u);
}
void addBenchmarkSpecOptions(OptionParser &op) {
op.addOption("dimx", OPT_INT, "0", "grid x dimension", 'x');
op.addOption("dimy", OPT_INT, "0", "grid y dimension", 'y');
op.addOption("framecount", OPT_INT, "0", "number of frames to track across", 'f');
op.addOption("np", OPT_INT, "0", "number of particles to use");
op.addOption("graph", OPT_BOOL, "0", "use CUDA Graph implementation");
}
void particlefilter_float(ResultDatabase &resultDB, int args[], bool useGraph);
void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) {
printf("Running ParticleFilter (float)\n");
int args[4];
args[0] = op.getOptionInt("dimx");
args[1] = op.getOptionInt("dimy");
args[2] = op.getOptionInt("framecount");
args[3] = op.getOptionInt("np");
bool preset = false;
verbose = op.getOptionBool("verbose");
quiet = op.getOptionBool("quiet");
bool useGraph = op.getOptionBool("graph");
for(int i = 0; i < 4; i++) {
if(args[i] <= 0) {
preset = true;
}
}
if(preset) {
int probSizes[4][4] = {{10, 10, 2, 100},
{40, 40, 5, 500},
{200, 200, 8, 500000},
{500, 500, 15, 1000000}};
int size = op.getOptionInt("size") - 1;
for(int i = 0; i < 4; i++) {
args[i] = probSizes[size][i];
}
}
if(!quiet) {
printf("Using dimx=%d, dimy=%d, framecount=%d, numparticles=%d\n",
args[0], args[1], args[2], args[3]);
}
int passes = op.getOptionInt("passes");
for(int i = 0; i < passes; i++) {
if(!quiet) {
printf("Pass %d: ", i);
}
particlefilter_float(resultDB, args, useGraph);
if(!quiet) {
printf("Done.\n");
}
}
}
void particlefilter_float(ResultDatabase &resultDB, int args[], bool useGraph) {
int IszX, IszY, Nfr, Nparticles;
IszX = args[0];
IszY = args[1];
Nfr = args[2];
Nparticles = args[3];
//establish seed
int * seed = (int *) malloc(sizeof (int) *Nparticles);
int i;
for (i = 0; i < Nparticles; i++)
seed[i] = time(0) * i;
//malloc matrix
unsigned char * I = (unsigned char *) malloc(sizeof (unsigned char) *IszX * IszY * Nfr);
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
//call particle filter
if (useGraph) particleFilterGraph(I, IszX, IszY, Nfr, seed, Nparticles, resultDB);
else particleFilter(I, IszX, IszY, Nfr, seed, Nparticles, resultDB);
free(seed);
free(I);
}
|
7f8f7a168ecfde1c768065acdd9f6136c2fd38f8.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 7f8f7a168ecfde1c768065acdd9f6136c2fd38f8.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
1f6fcbd4c45b792e7996640214529cc6b6267c11.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/lookup_table_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, int BlockDimX, int BlockDimY, int GridDimX,
bool PaddingFlag>
__global__ void LookupTable(T *output, const T *table, const int64_t *ids,
const int64_t N, const int64_t K, const int64_t D,
const int64_t padding_idx) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ENFORCE(
id >= 0,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
PADDLE_ENFORCE(
id < N,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
T *out = output + idy * D;
const T *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
if (PaddingFlag) {
if (id == padding_idx)
out[i] = static_cast<T>(0);
else
out[i] = tab[i];
} else {
out[i] = tab[i];
}
}
idy += BlockDimY * GridDimX;
}
}
template <typename T, int BlockDimX, int BlockDimY, int GridDimX>
__global__ void LookupTableGrad(T *table, const T *output, const int64_t *ids,
const int64_t N, const int64_t K,
const int64_t D) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ENFORCE(
id >= 0,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
PADDLE_ENFORCE(
id < N,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
const T *out = output + idy * D;
T *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
paddle::platform::CudaAtomicAdd(&tab[i], out[i]);
}
idy += BlockDimY * GridDimX;
}
}
template <typename T>
class LookupTableCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *table_t = context.Input<LoDTensor>("W");
auto *ids_t = context.Input<LoDTensor>("Ids");
auto *output_t = context.Output<LoDTensor>("Out");
int64_t padding_idx = context.Attr<int64_t>("padding_idx");
auto id_name = context.InputNames("Ids").front();
auto out_name = context.OutputNames("Out").front();
size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1];
size_t K = ids_t->numel();
auto *ids = ids_t->data<int64_t>();
auto *table = table_t->data<T>();
auto *output = output_t->mutable_data<T>(context.GetPlace());
#ifdef PADDLE_WITH_HIP
dim3 threads(64, 4);
#else
dim3 threads(128, 8);
#endif // PADDLE_WITH_HIP
dim3 grids(8, 1);
#ifdef PADDLE_WITH_HIP
if (padding_idx == -1)
hipLaunchKernelGGL(( LookupTable<
T, 64, 4, 8,
false>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(),
output, table, ids, N, K, D, padding_idx);
else
hipLaunchKernelGGL(( LookupTable<
T, 64, 4, 8,
true>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(),
output, table, ids, N, K, D, padding_idx);
#else
if (padding_idx == -1)
hipLaunchKernelGGL(( LookupTable<
T, 128, 8, 8,
false>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(),
output, table, ids, N, K, D, padding_idx);
else
hipLaunchKernelGGL(( LookupTable<
T, 128, 8, 8,
true>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(),
output, table, ids, N, K, D, padding_idx);
#endif // PADDLE_WITH_HIP
}
};
template <typename T>
class LookupTableGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto &dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
bool is_sparse = context.Attr<bool>("is_sparse");
// Since paddings are not trainable and fixed in forward, the gradient of
// paddings makes no sense and we don't deal with it in backward.
if (is_sparse) {
auto *ids = context.Input<LoDTensor>("Ids");
auto *table = context.Input<LoDTensor>("W");
auto *d_output = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto *d_table = context.Output<SelectedRows>(framework::GradVarName("W"));
auto *ids_data = ids->data<int64_t>();
int64_t ids_num = ids->numel();
auto stream = dev_ctx.stream();
// copy GPU memory to CPU pinned memory
framework::Vector<int64_t> new_rows;
new_rows.resize(ids_num);
auto gpu_place = BOOST_GET_CONST(platform::CUDAPlace, context.GetPlace());
// TODO(yuyang18): Strange code here.
memory::Copy(gpu_place, new_rows.CUDAMutableData(context.GetPlace()),
gpu_place, ids_data, ids_num * sizeof(int64_t), stream);
d_table->set_rows(new_rows);
auto *d_table_value = d_table->mutable_value();
d_table_value->Resize({ids_num, table->dims()[1]});
d_table_value->mutable_data<T>(context.GetPlace());
auto *d_table_data = d_table_value->data<T>();
auto *d_output_data = d_output->data<T>();
auto d_output_dims = d_output->dims();
auto d_output_dims_2d =
framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1);
PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d,
platform::errors::InvalidArgument(
"ShapeError: The shape of lookup_table@Grad and "
"output@Grad should be same. "
"But received lookup_table@Grad's shape = [%s], "
"output@Grad's shape = [%s].",
d_table_value->dims(), d_output_dims_2d));
memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data,
d_output->numel() * sizeof(T), stream);
} else {
auto ids_t = context.Input<LoDTensor>("Ids");
auto d_output_t = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto d_table_t = context.Output<LoDTensor>(framework::GradVarName("W"));
int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1];
int K = ids_t->numel();
const int64_t *ids = ids_t->data<int64_t>();
const T *d_output = d_output_t->data<T>();
T *d_table = d_table_t->mutable_data<T>(context.GetPlace());
auto t = framework::EigenVector<T>::Flatten(*d_table_t);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
#ifdef PADDLE_WITH_HIP
dim3 threads(64, 4);
#else
dim3 threads(128, 8);
#endif // PADDLE_WITH_HIP
dim3 grids(8, 1);
#ifdef PADDLE_WITH_HIP
hipLaunchKernelGGL(( LookupTableGrad<T, 64, 4, 8>), dim3(grids), dim3(threads), 0, dev_ctx.stream(),
d_table, d_output, ids, N, K, D);
#else
hipLaunchKernelGGL(( LookupTableGrad<T, 128, 8, 8>), dim3(grids), dim3(threads), 0, dev_ctx.stream(),
d_table, d_output, ids, N, K, D);
#endif // PADDLE_WITH_HIP
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(lookup_table, ops::LookupTableCUDAKernel<float>,
ops::LookupTableCUDAKernel<double>,
ops::LookupTableCUDAKernel<plat::float16>,
ops::LookupTableCUDAKernel<int8_t>,
ops::LookupTableCUDAKernel<int16_t>);
REGISTER_OP_CUDA_KERNEL(lookup_table_grad,
ops::LookupTableGradCUDAKernel<float>,
ops::LookupTableGradCUDAKernel<double>,
ops::LookupTableGradCUDAKernel<plat::float16>);
| 1f6fcbd4c45b792e7996640214529cc6b6267c11.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/lookup_table_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, int BlockDimX, int BlockDimY, int GridDimX,
bool PaddingFlag>
__global__ void LookupTable(T *output, const T *table, const int64_t *ids,
const int64_t N, const int64_t K, const int64_t D,
const int64_t padding_idx) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ENFORCE(
id >= 0,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
PADDLE_ENFORCE(
id < N,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
T *out = output + idy * D;
const T *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
if (PaddingFlag) {
if (id == padding_idx)
out[i] = static_cast<T>(0);
else
out[i] = tab[i];
} else {
out[i] = tab[i];
}
}
idy += BlockDimY * GridDimX;
}
}
template <typename T, int BlockDimX, int BlockDimY, int GridDimX>
__global__ void LookupTableGrad(T *table, const T *output, const int64_t *ids,
const int64_t N, const int64_t K,
const int64_t D) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ENFORCE(
id >= 0,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
PADDLE_ENFORCE(
id < N,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
const T *out = output + idy * D;
T *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
paddle::platform::CudaAtomicAdd(&tab[i], out[i]);
}
idy += BlockDimY * GridDimX;
}
}
template <typename T>
class LookupTableCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *table_t = context.Input<LoDTensor>("W");
auto *ids_t = context.Input<LoDTensor>("Ids");
auto *output_t = context.Output<LoDTensor>("Out");
int64_t padding_idx = context.Attr<int64_t>("padding_idx");
auto id_name = context.InputNames("Ids").front();
auto out_name = context.OutputNames("Out").front();
size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1];
size_t K = ids_t->numel();
auto *ids = ids_t->data<int64_t>();
auto *table = table_t->data<T>();
auto *output = output_t->mutable_data<T>(context.GetPlace());
#ifdef PADDLE_WITH_HIP
dim3 threads(64, 4);
#else
dim3 threads(128, 8);
#endif // PADDLE_WITH_HIP
dim3 grids(8, 1);
#ifdef PADDLE_WITH_HIP
if (padding_idx == -1)
LookupTable<
T, 64, 4, 8,
false><<<grids, threads, 0, context.cuda_device_context().stream()>>>(
output, table, ids, N, K, D, padding_idx);
else
LookupTable<
T, 64, 4, 8,
true><<<grids, threads, 0, context.cuda_device_context().stream()>>>(
output, table, ids, N, K, D, padding_idx);
#else
if (padding_idx == -1)
LookupTable<
T, 128, 8, 8,
false><<<grids, threads, 0, context.cuda_device_context().stream()>>>(
output, table, ids, N, K, D, padding_idx);
else
LookupTable<
T, 128, 8, 8,
true><<<grids, threads, 0, context.cuda_device_context().stream()>>>(
output, table, ids, N, K, D, padding_idx);
#endif // PADDLE_WITH_HIP
}
};
template <typename T>
class LookupTableGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto &dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
bool is_sparse = context.Attr<bool>("is_sparse");
// Since paddings are not trainable and fixed in forward, the gradient of
// paddings makes no sense and we don't deal with it in backward.
if (is_sparse) {
auto *ids = context.Input<LoDTensor>("Ids");
auto *table = context.Input<LoDTensor>("W");
auto *d_output = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto *d_table = context.Output<SelectedRows>(framework::GradVarName("W"));
auto *ids_data = ids->data<int64_t>();
int64_t ids_num = ids->numel();
auto stream = dev_ctx.stream();
// copy GPU memory to CPU pinned memory
framework::Vector<int64_t> new_rows;
new_rows.resize(ids_num);
auto gpu_place = BOOST_GET_CONST(platform::CUDAPlace, context.GetPlace());
// TODO(yuyang18): Strange code here.
memory::Copy(gpu_place, new_rows.CUDAMutableData(context.GetPlace()),
gpu_place, ids_data, ids_num * sizeof(int64_t), stream);
d_table->set_rows(new_rows);
auto *d_table_value = d_table->mutable_value();
d_table_value->Resize({ids_num, table->dims()[1]});
d_table_value->mutable_data<T>(context.GetPlace());
auto *d_table_data = d_table_value->data<T>();
auto *d_output_data = d_output->data<T>();
auto d_output_dims = d_output->dims();
auto d_output_dims_2d =
framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1);
PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d,
platform::errors::InvalidArgument(
"ShapeError: The shape of lookup_table@Grad and "
"output@Grad should be same. "
"But received lookup_table@Grad's shape = [%s], "
"output@Grad's shape = [%s].",
d_table_value->dims(), d_output_dims_2d));
memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data,
d_output->numel() * sizeof(T), stream);
} else {
auto ids_t = context.Input<LoDTensor>("Ids");
auto d_output_t = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto d_table_t = context.Output<LoDTensor>(framework::GradVarName("W"));
int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1];
int K = ids_t->numel();
const int64_t *ids = ids_t->data<int64_t>();
const T *d_output = d_output_t->data<T>();
T *d_table = d_table_t->mutable_data<T>(context.GetPlace());
auto t = framework::EigenVector<T>::Flatten(*d_table_t);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
#ifdef PADDLE_WITH_HIP
dim3 threads(64, 4);
#else
dim3 threads(128, 8);
#endif // PADDLE_WITH_HIP
dim3 grids(8, 1);
#ifdef PADDLE_WITH_HIP
LookupTableGrad<T, 64, 4, 8><<<grids, threads, 0, dev_ctx.stream()>>>(
d_table, d_output, ids, N, K, D);
#else
LookupTableGrad<T, 128, 8, 8><<<grids, threads, 0, dev_ctx.stream()>>>(
d_table, d_output, ids, N, K, D);
#endif // PADDLE_WITH_HIP
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(lookup_table, ops::LookupTableCUDAKernel<float>,
ops::LookupTableCUDAKernel<double>,
ops::LookupTableCUDAKernel<plat::float16>,
ops::LookupTableCUDAKernel<int8_t>,
ops::LookupTableCUDAKernel<int16_t>);
REGISTER_OP_CUDA_KERNEL(lookup_table_grad,
ops::LookupTableGradCUDAKernel<float>,
ops::LookupTableGradCUDAKernel<double>,
ops::LookupTableGradCUDAKernel<plat::float16>);
|
31bed7258e154c37505a8684f051b3e0f5c43e9c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "resizeAndMergeBase.hpp"
//#include <openpose/gpu/cuda.hpp>
//#include <openpose_private/gpu/cuda.hu>
#include "cudaStuff.hpp"
namespace op
{
const auto THREADS_PER_BLOCK = 256u;
const auto THREADS_PER_BLOCK_1D = 16u;
template <typename T>
__global__ void fillKernel(
T* targetPtr, const T* const sourcePtr, const int N)
{
const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
if (x < N)
targetPtr[x] = sourcePtr[x];
}
// template <typename T>
// __global__ void resizeKernelOld(
// T* targetPtr, const T* const sourcePtr, const int widthSource, const int heightSource, const int widthTarget,
// const int heightTarget)
// {
// const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
// const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
// if (x < widthTarget && y < heightTarget)
// {
// const T xSource = (x + T(0.5f)) * widthSource / T(widthTarget) - T(0.5f);
// const T ySource = (y + T(0.5f)) * heightSource / T(heightTarget) - T(0.5f);
// targetPtr[y*widthTarget+x] = bicubicInterpolate(
// sourcePtr, xSource, ySource, widthSource, heightSource, widthSource);
// }
// }
template <typename T>
__global__ void resizeKernel(
T* targetPtr, const T* const sourcePtr, const int widthSource, const int heightSource, const int widthTarget,
const int heightTarget)
{
const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
const auto channel = (blockIdx.z * blockDim.z) + threadIdx.z;
if (x < widthTarget && y < heightTarget)
{
const auto sourceArea = widthSource * heightSource;
const auto targetArea = widthTarget * heightTarget;
const T xSource = (x + T(0.5f)) * widthSource / T(widthTarget) - T(0.5f);
const T ySource = (y + T(0.5f)) * heightSource / T(heightTarget) - T(0.5f);
const T* const sourcePtrChannel = sourcePtr + channel * sourceArea;
targetPtr[channel * targetArea + y*widthTarget+x] = bicubicInterpolate(
sourcePtrChannel, xSource, ySource, widthSource, heightSource, widthSource);
}
}
template <typename T>
__global__ void resizeAndPadKernel(
T* targetPtr, const T* const sourcePtr, const int widthSource, const int heightSource, const int widthTarget,
const int heightTarget, const T rescaleFactor)
{
const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
const auto channel = (blockIdx.z * blockDim.z) + threadIdx.z;
if (x < widthTarget && y < heightTarget)
{
const auto targetArea = widthTarget * heightTarget;
if (x < widthSource * rescaleFactor && y < heightSource * rescaleFactor)
{
const auto sourceArea = widthSource * heightSource;
const T xSource = (x + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const T ySource = (y + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const T* const sourcePtrChannel = sourcePtr + channel * sourceArea;
targetPtr[channel * targetArea + y*widthTarget+x] = bicubicInterpolate(
sourcePtrChannel, xSource, ySource, widthSource, heightSource, widthSource);
}
else
targetPtr[channel * targetArea + y*widthTarget+x] = 0;
}
}
template <typename T>
__global__ void resizeAndPadKernel(
T* targetPtr, const unsigned char* const sourcePtr, const int widthSource, const int heightSource,
const int widthTarget, const int heightTarget, const T rescaleFactor)
{
const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
const auto channel = (blockIdx.z * blockDim.z) + threadIdx.z;
if (x < widthTarget && y < heightTarget)
{
const auto targetArea = widthTarget * heightTarget;
if (x < widthSource * rescaleFactor && y < heightSource * rescaleFactor)
{
const auto sourceArea = widthSource * heightSource;
const T xSource = (x + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const T ySource = (y + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const unsigned char* sourcePtrChannel = sourcePtr + channel * sourceArea;
targetPtr[channel * targetArea + y*widthTarget+x] = bicubicInterpolate(
sourcePtrChannel, xSource, ySource, widthSource, heightSource, widthSource);
}
else
targetPtr[channel * targetArea + y*widthTarget+x] = 0;
}
}
template <typename T>
__global__ void resize8TimesKernel(
T* targetPtr, const T* const sourcePtr, const int widthSource, const int heightSource, const int widthTarget,
const int heightTarget, const unsigned int rescaleFactor)
{
const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
const auto channel = (blockIdx.z * blockDim.z) + threadIdx.z;
if (x < widthTarget && y < heightTarget)
{
// Normal resize
// Note: The first blockIdx of each dimension behaves differently, so applying old version in those
if (blockIdx.x < 1 || blockIdx.y < 1)
// Actually it is only required for the first 4, but then I would have not loaded the shared memory
// if ((blockIdx.x < 1 || blockIdx.y < 1) && (threadIdx.x < 4 || threadIdx.y < 4))
{
const auto sourceArea = widthSource * heightSource;
const auto targetArea = widthTarget * heightTarget;
const T xSource = (x + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const T ySource = (y + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const T* const sourcePtrChannel = sourcePtr + channel * sourceArea;
targetPtr[channel * targetArea + y*widthTarget+x] = bicubicInterpolate(
sourcePtrChannel, xSource, ySource, widthSource, heightSource, widthSource);
return;
}
// Load shared memory
// If resize >= 5, then #threads per block >= # elements of shared memory
const auto sharedSize = 25; // (4+1)^2
__shared__ T sourcePtrShared[sharedSize];
const auto sharedLoadId = threadIdx.x + rescaleFactor*threadIdx.y;
if (sharedLoadId < sharedSize)
{
// Idea: Find minimum possible x and y
const auto minTargetX = blockIdx.x * rescaleFactor;
const auto minSourceXFloat = (minTargetX + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const auto minSourceXInt = int(floor(minSourceXFloat)) - 1;
const auto minTargetY = blockIdx.y * rescaleFactor;
const auto minSourceYFloat = (minTargetY + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const auto minSourceYInt = int(floor(minSourceYFloat)) - 1;
// Get current x and y
const auto xClean = fastTruncateCuda(minSourceXInt+int(sharedLoadId%5), 0, widthSource - 1);
const auto yClean = fastTruncateCuda(minSourceYInt+int(sharedLoadId/5), 0, heightSource - 1);
// Load into shared memory
const auto sourceIndex = (channel * heightSource + yClean) * widthSource + xClean;
sourcePtrShared[sharedLoadId] = sourcePtr[sourceIndex];
}
__syncthreads();
// Apply resize
const auto targetArea = widthTarget * heightTarget;
const T xSource = (x + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const T ySource = (y + T(0.5f)) / T(rescaleFactor) - T(0.5f);
targetPtr[channel * targetArea + y*widthTarget+x] = bicubicInterpolate8Times(
sourcePtrShared, xSource, ySource, widthSource, heightSource, threadIdx.x, threadIdx.y);
}
}
template <typename T>
__global__ void resizeAndAddAndAverageKernel(
T* targetPtr, const int counter, const T* const scaleWidths, const T* const scaleHeights,
const int* const widthSources, const int* const heightSources, const int widthTarget, const int heightTarget,
const T* const sourcePtr0, const T* const sourcePtr1, const T* const sourcePtr2, const T* const sourcePtr3,
const T* const sourcePtr4, const T* const sourcePtr5, const T* const sourcePtr6, const T* const sourcePtr7)
{
const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
const auto channel = (blockIdx.z * blockDim.z) + threadIdx.z;
// For each pixel
if (x < widthTarget && y < heightTarget)
{
// Local variable for higher speed
T interpolated = T(0.f);
// For each input source pointer
for (auto i = 0 ; i < counter ; ++i)
{
const auto sourceArea = widthSources[i] * heightSources[i];
const T xSource = (x + T(0.5f)) / scaleWidths[i] - T(0.5f);
const T ySource = (y + T(0.5f)) / scaleHeights[i] - T(0.5f);
const T* const sourcePtr = (
i == 0 ? sourcePtr0 : i == 1 ? sourcePtr1 : i == 2 ? sourcePtr2 : i == 3 ? sourcePtr3
: i == 4 ? sourcePtr4 : i == 5 ? sourcePtr5 : i == 6 ? sourcePtr6 : sourcePtr7);
const T* const sourcePtrChannel = sourcePtr + channel * sourceArea;
interpolated += bicubicInterpolate(
sourcePtrChannel, xSource, ySource, widthSources[i], heightSources[i], widthSources[i]);
}
// Save into memory
const auto targetArea = widthTarget * heightTarget;
targetPtr[channel * targetArea + y*widthTarget+x] = interpolated / T(counter);
}
}
// template <typename T>
// __global__ void resizeAndAddKernel(
// T* targetPtr, const T* const sourcePtr, const T scaleWidth, const T scaleHeight, const int widthSource,
// const int heightSource, const int widthTarget, const int heightTarget)
// {
// const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
// const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
// const auto channel = (blockIdx.z * blockDim.z) + threadIdx.z;
// if (x < widthTarget && y < heightTarget)
// {
// const auto sourceArea = widthSource * heightSource;
// const auto targetArea = widthTarget * heightTarget;
// const T xSource = (x + T(0.5f)) * widthSource / T(widthTarget) - T(0.5f);
// const T ySource = (y + T(0.5f)) * heightSource / T(heightTarget) - T(0.5f);
// const T* const sourcePtrChannel = sourcePtr + channel * sourceArea;
// targetPtr[channel * targetArea + y*widthTarget+x] += bicubicInterpolate(
// sourcePtrChannel, xSource, ySource, widthSource, heightSource, widthSource);
// }
// }
// template <typename T>
// __global__ void resizeAndAverageKernel(
// T* targetPtr, const T* const sourcePtr, const T scaleWidth, const T scaleHeight, const int widthSource,
// const int heightSource, const int widthTarget, const int heightTarget, const int counter)
// {
// const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
// const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
// const auto channel = (blockIdx.z * blockDim.z) + threadIdx.z;
// if (x < widthTarget && y < heightTarget)
// {
// const auto sourceArea = widthSource * heightSource;
// const auto targetArea = widthTarget * heightTarget;
// const T xSource = (x + T(0.5f)) / scaleWidth - T(0.5f);
// const T ySource = (y + T(0.5f)) / scaleHeight - T(0.5f);
// const T* const sourcePtrChannel = sourcePtr + channel * sourceArea;
// const auto interpolated = bicubicInterpolate(
// sourcePtrChannel, xSource, ySource, widthSource, heightSource, widthSource);
// auto& targetPixel = targetPtr[channel * targetArea + y*widthTarget+x];
// targetPixel = (targetPixel + interpolated) / T(counter);
// }
// }
// template <typename T>
// __global__ void resizeAndAddKernelOld(
// T* targetPtr, const T* const sourcePtr, const T scaleWidth, const T scaleHeight, const int widthSource,
// const int heightSource, const int widthTarget, const int heightTarget)
// {
// const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
// const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
// if (x < widthTarget && y < heightTarget)
// {
// const T xSource = (x + T(0.5f)) / scaleWidth - T(0.5f);
// const T ySource = (y + T(0.5f)) / scaleHeight - T(0.5f);
// targetPtr[y*widthTarget+x] += bicubicInterpolate(
// sourcePtr, xSource, ySource, widthSource, heightSource, widthSource);
// }
// }
// template <typename T>
// __global__ void resizeAndAverageKernelOld(
// T* targetPtr, const T* const sourcePtr, const T scaleWidth, const T scaleHeight, const int widthSource,
// const int heightSource, const int widthTarget, const int heightTarget, const int counter)
// {
// const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
// const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
// if (x < widthTarget && y < heightTarget)
// {
// const T xSource = (x + T(0.5f)) / scaleWidth - T(0.5f);
// const T ySource = (y + T(0.5f)) / scaleHeight - T(0.5f);
// const auto interpolated = bicubicInterpolate(
// sourcePtr, xSource, ySource, widthSource, heightSource, widthSource);
// auto& targetPixel = targetPtr[y*widthTarget+x];
// targetPixel = (targetPixel + interpolated) / T(counter);
// }
// }
template <typename T>
void resizeAndMergeGpu(
T* targetPtr, const std::vector<const T*>& sourcePtrs, const std::array<int, 4>& targetSize,
const std::vector<std::array<int, 4>>& sourceSizes, const std::vector<T>& scaleInputToNetInputs)
{
try
{
// Sanity checks
if (sourceSizes.empty())
printf("resizeAndMergeGpu: sourceSizes cannot be empty.\n");
if (sourcePtrs.size() != sourceSizes.size() || sourceSizes.size() != scaleInputToNetInputs.size())
printf("resizeAndMergeGpu: Size(sourcePtrs) must match size(sourceSizes) and size(scaleInputToNetInputs). Currently: "
"%d %d %d\n",
(int)sourcePtrs.size(),
(int)sourceSizes.size(),
(int)scaleInputToNetInputs.size());
// Parameters
const auto channels = targetSize[1];
const auto heightTarget = targetSize[2];
const auto widthTarget = targetSize[3];
// const dim3 threadsPerBlock{THREADS_PER_BLOCK_1D, THREADS_PER_BLOCK_1D};
// const dim3 numBlocks{
// getNumberCudaBlocks(widthTarget, threadsPerBlock.x),
// getNumberCudaBlocks(heightTarget, threadsPerBlock.y)};
const auto& sourceSize = sourceSizes[0];
const auto heightSource = sourceSize[2];
const auto widthSource = sourceSize[3];
// No multi-scale merging or no merging required
if (sourceSizes.size() == 1)
{
const auto num = sourceSize[0];
if (targetSize[0] > 1 || num == 1)
{
// // Profiling code
// const auto REPS = 100;
// double timeNormalize1 = 0.;
// double timeNormalize2 = 0.;
// double timeNormalize3 = 0.;
// // Non-optimized function
// OP_CUDA_PROFILE_INIT(REPS);
// const auto sourceChannelOffset = heightSource * widthSource;
// const auto targetChannelOffset = widthTarget * heightTarget;
// for (auto n = 0; n < num; n++)
// {
// const auto offsetBase = n*channels;
// for (auto c = 0 ; c < channels ; c++)
// {
// const auto offset = offsetBase + c;
// resizeKernelOld<<<numBlocks, threadsPerBlock>>>(
// targetPtr + offset * targetChannelOffset,
// sourcePtrs.at(0) + offset * sourceChannelOffset,
// widthSource, heightSource, widthTarget, heightTarget);
// }
// }
// OP_CUDA_PROFILE_END(timeNormalize1, 1e3, REPS);
// // Optimized function for any resize size (suboptimal for 8x resize)
// OP_CUDA_PROFILE_INIT(REPS);
// const dim3 threadsPerBlock{THREADS_PER_BLOCK_1D, THREADS_PER_BLOCK_1D, 1};
// const dim3 numBlocks{
// getNumberCudaBlocks(widthTarget, threadsPerBlock.x),
// getNumberCudaBlocks(heightTarget, threadsPerBlock.y),
// getNumberCudaBlocks(num * channels, threadsPerBlock.z)};
// resizeKernel<<<numBlocks, threadsPerBlock>>>(
// targetPtr, sourcePtrs.at(0), widthSource, heightSource, widthTarget, heightTarget);
// OP_CUDA_PROFILE_END(timeNormalize2, 1e3, REPS);
// Optimized function for 8x resize
// OP_CUDA_PROFILE_INIT(REPS);
if (widthTarget / widthSource == 1 && heightTarget / heightSource == 1)
{
const auto N = widthTarget * heightTarget * num * channels;
const dim3 threadsPerBlock{THREADS_PER_BLOCK};
const dim3 numBlocks{getNumberCudaBlocks(N, threadsPerBlock.x)};
hipLaunchKernelGGL(( fillKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0,
targetPtr, sourcePtrs.at(0), N);
}
else
{
if (widthTarget / widthSource != 8 || heightTarget / heightSource != 8)
printf("Kernel only implemented for 8x resize. Notify us if this error appears.\n");
const auto rescaleFactor = (unsigned int) ::ceil(heightTarget / (float)(heightSource));
const dim3 threadsPerBlock{rescaleFactor, rescaleFactor, 1};
const dim3 numBlocks{
getNumberCudaBlocks(widthTarget, threadsPerBlock.x),
getNumberCudaBlocks(heightTarget, threadsPerBlock.y),
getNumberCudaBlocks(num * channels, threadsPerBlock.z)};
hipLaunchKernelGGL(( resize8TimesKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0,
targetPtr, sourcePtrs.at(0), widthSource, heightSource, widthTarget, heightTarget,
rescaleFactor);
}
// OP_CUDA_PROFILE_END(timeNormalize3, 1e3, REPS);
// // Profiling code
// opLog(" Res(ori)=" + std::to_string(timeNormalize1) + "ms");
// opLog(" Res(new)=" + std::to_string(timeNormalize2) + "ms");
// opLog(" Res(new8x)=" + std::to_string(timeNormalize3) + "ms");
}
// Old inefficient multi-scale merging
else
printf("It should never reache this point. Notify us otherwise.\n");
}
// Multi-scaling merging
else
{
const auto scaleToMainScaleWidth = widthTarget / T(widthSource);
const auto scaleToMainScaleHeight = heightTarget / T(heightSource);
// // Profiling code
// const auto REPS = 10;
// // const auto REPS = 100;
// double timeNormalize1 = 0.;
// double timeNormalize2 = 0.;
// double timeNormalize3 = 0.;
// // Non-optimized function
// OP_CUDA_PROFILE_INIT(REPS);
// const auto targetChannelOffset = widthTarget * heightTarget;
// hipMemset(targetPtr, 0, channels*targetChannelOffset * sizeof(T));
// for (auto i = 0u ; i < sourceSizes.size(); ++i)
// {
// const auto& currentSize = sourceSizes.at(i);
// const auto currentHeight = currentSize[2];
// const auto currentWidth = currentSize[3];
// const auto sourceChannelOffset = currentHeight * currentWidth;
// const auto scaleInputToNet = scaleInputToNetInputs[i] / scaleInputToNetInputs[0];
// const auto scaleWidth = scaleToMainScaleWidth / scaleInputToNet;
// const auto scaleHeight = scaleToMainScaleHeight / scaleInputToNet;
// // All but last image --> add
// if (i < sourceSizes.size() - 1)
// {
// for (auto c = 0 ; c < channels ; c++)
// {
// resizeAndAddKernelOld<<<numBlocks, threadsPerBlock>>>(
// targetPtr + c * targetChannelOffset, sourcePtrs[i] + c * sourceChannelOffset,
// scaleWidth, scaleHeight, currentWidth, currentHeight, widthTarget,
// heightTarget);
// }
// }
// // Last image --> average all
// else
// {
// for (auto c = 0 ; c < channels ; c++)
// {
// resizeAndAverageKernelOld<<<numBlocks, threadsPerBlock>>>(
// targetPtr + c * targetChannelOffset, sourcePtrs[i] + c * sourceChannelOffset,
// scaleWidth, scaleHeight, currentWidth, currentHeight, widthTarget,
// heightTarget, (int)sourceSizes.size());
// }
// }
// }
// OP_CUDA_PROFILE_END(timeNormalize1, 1e3, REPS);
// // Optimized function for any resize size (suboptimal for 8x resize)
// OP_CUDA_PROFILE_INIT(REPS);
// const auto targetChannelOffset = widthTarget * heightTarget;
// hipMemset(targetPtr, 0, channels*targetChannelOffset * sizeof(T));
// const dim3 threadsPerBlock{THREADS_PER_BLOCK_1D, THREADS_PER_BLOCK_1D, 1};
// const dim3 numBlocks{
// getNumberCudaBlocks(widthTarget, threadsPerBlock.x),
// getNumberCudaBlocks(heightTarget, threadsPerBlock.y),
// getNumberCudaBlocks(channels, threadsPerBlock.z)};
// for (auto i = 0u ; i < sourceSizes.size(); ++i)
// {
// const auto& currentSize = sourceSizes.at(i);
// const auto currentHeight = currentSize[2];
// const auto currentWidth = currentSize[3];
// const auto scaleInputToNet = scaleInputToNetInputs[i] / scaleInputToNetInputs[0];
// const auto scaleWidth = scaleToMainScaleWidth / scaleInputToNet;
// const auto scaleHeight = scaleToMainScaleHeight / scaleInputToNet;
// // All but last image --> add
// if (i < sourceSizes.size() - 1)
// resizeAndAddKernel<<<numBlocks, threadsPerBlock>>>(
// targetPtr, sourcePtrs[i], scaleWidth, scaleHeight, currentWidth, currentHeight,
// widthTarget, heightTarget);
// // Last image --> average all
// else
// resizeAndAverageKernelOld<<<numBlocks, threadsPerBlock>>>(
// targetPtr, sourcePtrs[i], scaleWidth, scaleHeight, currentWidth, currentHeight,
// widthTarget, heightTarget, (int)sourceSizes.size());
// }
// OP_CUDA_PROFILE_END(timeNormalize2, 1e3, REPS);
// Super optimized function
// OP_CUDA_PROFILE_INIT(REPS);
if (sourcePtrs.size() > 8)
printf("More than 8 scales are not implemented (yet). Notify us to implement it.\n");
const dim3 threadsPerBlock{THREADS_PER_BLOCK_1D, THREADS_PER_BLOCK_1D, 1};
const dim3 numBlocks{
getNumberCudaBlocks(widthTarget, threadsPerBlock.x),
getNumberCudaBlocks(heightTarget, threadsPerBlock.y),
getNumberCudaBlocks(channels, threadsPerBlock.z)};
// Fill auxiliary params
std::vector<int> widthSourcesCpu(sourceSizes.size());
std::vector<int> heightSourcesCpu(sourceSizes.size());
std::vector<T> scaleWidthsCpu(sourceSizes.size());
std::vector<T> scaleHeightsCpu(sourceSizes.size());
for (auto i = 0u ; i < sourceSizes.size(); ++i)
{
const auto& currentSize = sourceSizes.at(i);
heightSourcesCpu[i] = currentSize[2];
widthSourcesCpu[i] = currentSize[3];
const auto scaleInputToNet = scaleInputToNetInputs[i] / scaleInputToNetInputs[0];
scaleWidthsCpu[i] = scaleToMainScaleWidth / scaleInputToNet;
scaleHeightsCpu[i] = scaleToMainScaleHeight / scaleInputToNet;
}
// GPU params
int* widthSources;
hipMalloc((void**)&widthSources, sizeof(int) * sourceSizes.size());
hipMemcpy(
widthSources, widthSourcesCpu.data(), sizeof(int) * sourceSizes.size(),
hipMemcpyHostToDevice);
int* heightSources;
hipMalloc((void**)&heightSources, sizeof(int) * sourceSizes.size());
hipMemcpy(
heightSources, heightSourcesCpu.data(), sizeof(int) * sourceSizes.size(),
hipMemcpyHostToDevice);
T* scaleWidths;
hipMalloc((void**)&scaleWidths, sizeof(T) * sourceSizes.size());
hipMemcpy(
scaleWidths, scaleWidthsCpu.data(), sizeof(T) * sourceSizes.size(),
hipMemcpyHostToDevice);
T* scaleHeights;
hipMalloc((void**)&scaleHeights, sizeof(T) * sourceSizes.size());
hipMemcpy(
scaleHeights, scaleHeightsCpu.data(), sizeof(T) * sourceSizes.size(),
hipMemcpyHostToDevice);
// Resize each channel, add all, and get average
hipLaunchKernelGGL(( resizeAndAddAndAverageKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0,
targetPtr, (int)sourceSizes.size(), scaleWidths, scaleHeights, widthSources, heightSources,
widthTarget, heightTarget, sourcePtrs[0], sourcePtrs[1], sourcePtrs[2], sourcePtrs[3],
sourcePtrs[4], sourcePtrs[5], sourcePtrs[6], sourcePtrs[7]);
// Free memory
if (widthSources != nullptr)
hipFree(widthSources);
if (heightSources != nullptr)
hipFree(heightSources);
if (scaleWidths != nullptr)
hipFree(scaleWidths);
if (scaleHeights != nullptr)
hipFree(scaleHeights);
// OP_CUDA_PROFILE_END(timeNormalize3, 1e3, REPS);
// // Profiling code
// opLog(" Res(orig)=" + std::to_string(timeNormalize1) + "ms");
// opLog(" Res(new4)=" + std::to_string(timeNormalize2) + "ms");
// opLog(" Res(new1)=" + std::to_string(timeNormalize3) + "ms");
}
// cudaCheck(__LINE__, __FUNCTION__, __FILE__);
}
catch (const std::exception& e)
{
printf("%s\n", e.what());
}
}
template <typename T>
void resizeAndPadRbgGpu(
T* targetPtr, const T* const srcPtr, const int widthSource, const int heightSource,
const int widthTarget, const int heightTarget, const T scaleFactor)
{
try
{
const auto channels = 3;
const dim3 threadsPerBlock{THREADS_PER_BLOCK_1D, THREADS_PER_BLOCK_1D, 1};
const dim3 numBlocks{
getNumberCudaBlocks(widthTarget, threadsPerBlock.x),
getNumberCudaBlocks(heightTarget, threadsPerBlock.y),
getNumberCudaBlocks(channels, threadsPerBlock.z)};
hipLaunchKernelGGL(( resizeAndPadKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0,
targetPtr, srcPtr, widthSource, heightSource, widthTarget, heightTarget, scaleFactor);
}
catch (const std::exception& e)
{
printf("%s\n", e.what());
}
}
template <typename T>
void resizeAndPadRbgGpu(
T* targetPtr, const unsigned char* const srcPtr, const int widthSource, const int heightSource,
const int widthTarget, const int heightTarget, const T scaleFactor)
{
try
{
const auto channels = 3;
const dim3 threadsPerBlock{THREADS_PER_BLOCK_1D, THREADS_PER_BLOCK_1D, 1};
const dim3 numBlocks{
getNumberCudaBlocks(widthTarget, threadsPerBlock.x),
getNumberCudaBlocks(heightTarget, threadsPerBlock.y),
getNumberCudaBlocks(channels, threadsPerBlock.z)};
hipLaunchKernelGGL(( resizeAndPadKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0,
targetPtr, srcPtr, widthSource, heightSource, widthTarget, heightTarget, scaleFactor);
}
catch (const std::exception& e)
{
printf("%s\n", e.what());
}
}
template void resizeAndMergeGpu(
float* targetPtr, const std::vector<const float*>& sourcePtrs, const std::array<int, 4>& targetSize,
const std::vector<std::array<int, 4>>& sourceSizes, const std::vector<float>& scaleInputToNetInputs);
template void resizeAndMergeGpu(
double* targetPtr, const std::vector<const double*>& sourcePtrs, const std::array<int, 4>& targetSize,
const std::vector<std::array<int, 4>>& sourceSizes, const std::vector<double>& scaleInputToNetInputs);
template void resizeAndPadRbgGpu(
float* targetPtr, const float* const srcPtr, const int widthSource, const int heightSource,
const int widthTarget, const int heightTarget, const float scaleFactor);
template void resizeAndPadRbgGpu(
double* targetPtr, const double* const srcPtr, const int widthSource, const int heightSource,
const int widthTarget, const int heightTarget, const double scaleFactor);
template void resizeAndPadRbgGpu(
float* targetPtr, const unsigned char* const srcPtr, const int widthSource, const int heightSource,
const int widthTarget, const int heightTarget, const float scaleFactor);
template void resizeAndPadRbgGpu(
double* targetPtr, const unsigned char* const srcPtr, const int widthSource, const int heightSource,
const int widthTarget, const int heightTarget, const double scaleFactor);
}
| 31bed7258e154c37505a8684f051b3e0f5c43e9c.cu | #include "resizeAndMergeBase.hpp"
//#include <openpose/gpu/cuda.hpp>
//#include <openpose_private/gpu/cuda.hu>
#include "cudaStuff.hpp"
namespace op
{
const auto THREADS_PER_BLOCK = 256u;
const auto THREADS_PER_BLOCK_1D = 16u;
template <typename T>
__global__ void fillKernel(
T* targetPtr, const T* const sourcePtr, const int N)
{
const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
if (x < N)
targetPtr[x] = sourcePtr[x];
}
// template <typename T>
// __global__ void resizeKernelOld(
// T* targetPtr, const T* const sourcePtr, const int widthSource, const int heightSource, const int widthTarget,
// const int heightTarget)
// {
// const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
// const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
// if (x < widthTarget && y < heightTarget)
// {
// const T xSource = (x + T(0.5f)) * widthSource / T(widthTarget) - T(0.5f);
// const T ySource = (y + T(0.5f)) * heightSource / T(heightTarget) - T(0.5f);
// targetPtr[y*widthTarget+x] = bicubicInterpolate(
// sourcePtr, xSource, ySource, widthSource, heightSource, widthSource);
// }
// }
template <typename T>
__global__ void resizeKernel(
T* targetPtr, const T* const sourcePtr, const int widthSource, const int heightSource, const int widthTarget,
const int heightTarget)
{
const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
const auto channel = (blockIdx.z * blockDim.z) + threadIdx.z;
if (x < widthTarget && y < heightTarget)
{
const auto sourceArea = widthSource * heightSource;
const auto targetArea = widthTarget * heightTarget;
const T xSource = (x + T(0.5f)) * widthSource / T(widthTarget) - T(0.5f);
const T ySource = (y + T(0.5f)) * heightSource / T(heightTarget) - T(0.5f);
const T* const sourcePtrChannel = sourcePtr + channel * sourceArea;
targetPtr[channel * targetArea + y*widthTarget+x] = bicubicInterpolate(
sourcePtrChannel, xSource, ySource, widthSource, heightSource, widthSource);
}
}
template <typename T>
__global__ void resizeAndPadKernel(
T* targetPtr, const T* const sourcePtr, const int widthSource, const int heightSource, const int widthTarget,
const int heightTarget, const T rescaleFactor)
{
const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
const auto channel = (blockIdx.z * blockDim.z) + threadIdx.z;
if (x < widthTarget && y < heightTarget)
{
const auto targetArea = widthTarget * heightTarget;
if (x < widthSource * rescaleFactor && y < heightSource * rescaleFactor)
{
const auto sourceArea = widthSource * heightSource;
const T xSource = (x + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const T ySource = (y + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const T* const sourcePtrChannel = sourcePtr + channel * sourceArea;
targetPtr[channel * targetArea + y*widthTarget+x] = bicubicInterpolate(
sourcePtrChannel, xSource, ySource, widthSource, heightSource, widthSource);
}
else
targetPtr[channel * targetArea + y*widthTarget+x] = 0;
}
}
template <typename T>
__global__ void resizeAndPadKernel(
T* targetPtr, const unsigned char* const sourcePtr, const int widthSource, const int heightSource,
const int widthTarget, const int heightTarget, const T rescaleFactor)
{
const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
const auto channel = (blockIdx.z * blockDim.z) + threadIdx.z;
if (x < widthTarget && y < heightTarget)
{
const auto targetArea = widthTarget * heightTarget;
if (x < widthSource * rescaleFactor && y < heightSource * rescaleFactor)
{
const auto sourceArea = widthSource * heightSource;
const T xSource = (x + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const T ySource = (y + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const unsigned char* sourcePtrChannel = sourcePtr + channel * sourceArea;
targetPtr[channel * targetArea + y*widthTarget+x] = bicubicInterpolate(
sourcePtrChannel, xSource, ySource, widthSource, heightSource, widthSource);
}
else
targetPtr[channel * targetArea + y*widthTarget+x] = 0;
}
}
template <typename T>
__global__ void resize8TimesKernel(
T* targetPtr, const T* const sourcePtr, const int widthSource, const int heightSource, const int widthTarget,
const int heightTarget, const unsigned int rescaleFactor)
{
const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
const auto channel = (blockIdx.z * blockDim.z) + threadIdx.z;
if (x < widthTarget && y < heightTarget)
{
// Normal resize
// Note: The first blockIdx of each dimension behaves differently, so applying old version in those
if (blockIdx.x < 1 || blockIdx.y < 1)
// Actually it is only required for the first 4, but then I would have not loaded the shared memory
// if ((blockIdx.x < 1 || blockIdx.y < 1) && (threadIdx.x < 4 || threadIdx.y < 4))
{
const auto sourceArea = widthSource * heightSource;
const auto targetArea = widthTarget * heightTarget;
const T xSource = (x + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const T ySource = (y + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const T* const sourcePtrChannel = sourcePtr + channel * sourceArea;
targetPtr[channel * targetArea + y*widthTarget+x] = bicubicInterpolate(
sourcePtrChannel, xSource, ySource, widthSource, heightSource, widthSource);
return;
}
// Load shared memory
// If resize >= 5, then #threads per block >= # elements of shared memory
const auto sharedSize = 25; // (4+1)^2
__shared__ T sourcePtrShared[sharedSize];
const auto sharedLoadId = threadIdx.x + rescaleFactor*threadIdx.y;
if (sharedLoadId < sharedSize)
{
// Idea: Find minimum possible x and y
const auto minTargetX = blockIdx.x * rescaleFactor;
const auto minSourceXFloat = (minTargetX + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const auto minSourceXInt = int(floor(minSourceXFloat)) - 1;
const auto minTargetY = blockIdx.y * rescaleFactor;
const auto minSourceYFloat = (minTargetY + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const auto minSourceYInt = int(floor(minSourceYFloat)) - 1;
// Get current x and y
const auto xClean = fastTruncateCuda(minSourceXInt+int(sharedLoadId%5), 0, widthSource - 1);
const auto yClean = fastTruncateCuda(minSourceYInt+int(sharedLoadId/5), 0, heightSource - 1);
// Load into shared memory
const auto sourceIndex = (channel * heightSource + yClean) * widthSource + xClean;
sourcePtrShared[sharedLoadId] = sourcePtr[sourceIndex];
}
__syncthreads();
// Apply resize
const auto targetArea = widthTarget * heightTarget;
const T xSource = (x + T(0.5f)) / T(rescaleFactor) - T(0.5f);
const T ySource = (y + T(0.5f)) / T(rescaleFactor) - T(0.5f);
targetPtr[channel * targetArea + y*widthTarget+x] = bicubicInterpolate8Times(
sourcePtrShared, xSource, ySource, widthSource, heightSource, threadIdx.x, threadIdx.y);
}
}
template <typename T>
__global__ void resizeAndAddAndAverageKernel(
T* targetPtr, const int counter, const T* const scaleWidths, const T* const scaleHeights,
const int* const widthSources, const int* const heightSources, const int widthTarget, const int heightTarget,
const T* const sourcePtr0, const T* const sourcePtr1, const T* const sourcePtr2, const T* const sourcePtr3,
const T* const sourcePtr4, const T* const sourcePtr5, const T* const sourcePtr6, const T* const sourcePtr7)
{
const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
const auto channel = (blockIdx.z * blockDim.z) + threadIdx.z;
// For each pixel
if (x < widthTarget && y < heightTarget)
{
// Local variable for higher speed
T interpolated = T(0.f);
// For each input source pointer
for (auto i = 0 ; i < counter ; ++i)
{
const auto sourceArea = widthSources[i] * heightSources[i];
const T xSource = (x + T(0.5f)) / scaleWidths[i] - T(0.5f);
const T ySource = (y + T(0.5f)) / scaleHeights[i] - T(0.5f);
const T* const sourcePtr = (
i == 0 ? sourcePtr0 : i == 1 ? sourcePtr1 : i == 2 ? sourcePtr2 : i == 3 ? sourcePtr3
: i == 4 ? sourcePtr4 : i == 5 ? sourcePtr5 : i == 6 ? sourcePtr6 : sourcePtr7);
const T* const sourcePtrChannel = sourcePtr + channel * sourceArea;
interpolated += bicubicInterpolate(
sourcePtrChannel, xSource, ySource, widthSources[i], heightSources[i], widthSources[i]);
}
// Save into memory
const auto targetArea = widthTarget * heightTarget;
targetPtr[channel * targetArea + y*widthTarget+x] = interpolated / T(counter);
}
}
// template <typename T>
// __global__ void resizeAndAddKernel(
// T* targetPtr, const T* const sourcePtr, const T scaleWidth, const T scaleHeight, const int widthSource,
// const int heightSource, const int widthTarget, const int heightTarget)
// {
// const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
// const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
// const auto channel = (blockIdx.z * blockDim.z) + threadIdx.z;
// if (x < widthTarget && y < heightTarget)
// {
// const auto sourceArea = widthSource * heightSource;
// const auto targetArea = widthTarget * heightTarget;
// const T xSource = (x + T(0.5f)) * widthSource / T(widthTarget) - T(0.5f);
// const T ySource = (y + T(0.5f)) * heightSource / T(heightTarget) - T(0.5f);
// const T* const sourcePtrChannel = sourcePtr + channel * sourceArea;
// targetPtr[channel * targetArea + y*widthTarget+x] += bicubicInterpolate(
// sourcePtrChannel, xSource, ySource, widthSource, heightSource, widthSource);
// }
// }
// template <typename T>
// __global__ void resizeAndAverageKernel(
// T* targetPtr, const T* const sourcePtr, const T scaleWidth, const T scaleHeight, const int widthSource,
// const int heightSource, const int widthTarget, const int heightTarget, const int counter)
// {
// const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
// const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
// const auto channel = (blockIdx.z * blockDim.z) + threadIdx.z;
// if (x < widthTarget && y < heightTarget)
// {
// const auto sourceArea = widthSource * heightSource;
// const auto targetArea = widthTarget * heightTarget;
// const T xSource = (x + T(0.5f)) / scaleWidth - T(0.5f);
// const T ySource = (y + T(0.5f)) / scaleHeight - T(0.5f);
// const T* const sourcePtrChannel = sourcePtr + channel * sourceArea;
// const auto interpolated = bicubicInterpolate(
// sourcePtrChannel, xSource, ySource, widthSource, heightSource, widthSource);
// auto& targetPixel = targetPtr[channel * targetArea + y*widthTarget+x];
// targetPixel = (targetPixel + interpolated) / T(counter);
// }
// }
// template <typename T>
// __global__ void resizeAndAddKernelOld(
// T* targetPtr, const T* const sourcePtr, const T scaleWidth, const T scaleHeight, const int widthSource,
// const int heightSource, const int widthTarget, const int heightTarget)
// {
// const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
// const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
// if (x < widthTarget && y < heightTarget)
// {
// const T xSource = (x + T(0.5f)) / scaleWidth - T(0.5f);
// const T ySource = (y + T(0.5f)) / scaleHeight - T(0.5f);
// targetPtr[y*widthTarget+x] += bicubicInterpolate(
// sourcePtr, xSource, ySource, widthSource, heightSource, widthSource);
// }
// }
// template <typename T>
// __global__ void resizeAndAverageKernelOld(
// T* targetPtr, const T* const sourcePtr, const T scaleWidth, const T scaleHeight, const int widthSource,
// const int heightSource, const int widthTarget, const int heightTarget, const int counter)
// {
// const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
// const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
// if (x < widthTarget && y < heightTarget)
// {
// const T xSource = (x + T(0.5f)) / scaleWidth - T(0.5f);
// const T ySource = (y + T(0.5f)) / scaleHeight - T(0.5f);
// const auto interpolated = bicubicInterpolate(
// sourcePtr, xSource, ySource, widthSource, heightSource, widthSource);
// auto& targetPixel = targetPtr[y*widthTarget+x];
// targetPixel = (targetPixel + interpolated) / T(counter);
// }
// }
template <typename T>
void resizeAndMergeGpu(
T* targetPtr, const std::vector<const T*>& sourcePtrs, const std::array<int, 4>& targetSize,
const std::vector<std::array<int, 4>>& sourceSizes, const std::vector<T>& scaleInputToNetInputs)
{
try
{
// Sanity checks
if (sourceSizes.empty())
printf("resizeAndMergeGpu: sourceSizes cannot be empty.\n");
if (sourcePtrs.size() != sourceSizes.size() || sourceSizes.size() != scaleInputToNetInputs.size())
printf("resizeAndMergeGpu: Size(sourcePtrs) must match size(sourceSizes) and size(scaleInputToNetInputs). Currently: "
"%d %d %d\n",
(int)sourcePtrs.size(),
(int)sourceSizes.size(),
(int)scaleInputToNetInputs.size());
// Parameters
const auto channels = targetSize[1];
const auto heightTarget = targetSize[2];
const auto widthTarget = targetSize[3];
// const dim3 threadsPerBlock{THREADS_PER_BLOCK_1D, THREADS_PER_BLOCK_1D};
// const dim3 numBlocks{
// getNumberCudaBlocks(widthTarget, threadsPerBlock.x),
// getNumberCudaBlocks(heightTarget, threadsPerBlock.y)};
const auto& sourceSize = sourceSizes[0];
const auto heightSource = sourceSize[2];
const auto widthSource = sourceSize[3];
// No multi-scale merging or no merging required
if (sourceSizes.size() == 1)
{
const auto num = sourceSize[0];
if (targetSize[0] > 1 || num == 1)
{
// // Profiling code
// const auto REPS = 100;
// double timeNormalize1 = 0.;
// double timeNormalize2 = 0.;
// double timeNormalize3 = 0.;
// // Non-optimized function
// OP_CUDA_PROFILE_INIT(REPS);
// const auto sourceChannelOffset = heightSource * widthSource;
// const auto targetChannelOffset = widthTarget * heightTarget;
// for (auto n = 0; n < num; n++)
// {
// const auto offsetBase = n*channels;
// for (auto c = 0 ; c < channels ; c++)
// {
// const auto offset = offsetBase + c;
// resizeKernelOld<<<numBlocks, threadsPerBlock>>>(
// targetPtr + offset * targetChannelOffset,
// sourcePtrs.at(0) + offset * sourceChannelOffset,
// widthSource, heightSource, widthTarget, heightTarget);
// }
// }
// OP_CUDA_PROFILE_END(timeNormalize1, 1e3, REPS);
// // Optimized function for any resize size (suboptimal for 8x resize)
// OP_CUDA_PROFILE_INIT(REPS);
// const dim3 threadsPerBlock{THREADS_PER_BLOCK_1D, THREADS_PER_BLOCK_1D, 1};
// const dim3 numBlocks{
// getNumberCudaBlocks(widthTarget, threadsPerBlock.x),
// getNumberCudaBlocks(heightTarget, threadsPerBlock.y),
// getNumberCudaBlocks(num * channels, threadsPerBlock.z)};
// resizeKernel<<<numBlocks, threadsPerBlock>>>(
// targetPtr, sourcePtrs.at(0), widthSource, heightSource, widthTarget, heightTarget);
// OP_CUDA_PROFILE_END(timeNormalize2, 1e3, REPS);
// Optimized function for 8x resize
// OP_CUDA_PROFILE_INIT(REPS);
if (widthTarget / widthSource == 1 && heightTarget / heightSource == 1)
{
const auto N = widthTarget * heightTarget * num * channels;
const dim3 threadsPerBlock{THREADS_PER_BLOCK};
const dim3 numBlocks{getNumberCudaBlocks(N, threadsPerBlock.x)};
fillKernel<<<numBlocks, threadsPerBlock>>>(
targetPtr, sourcePtrs.at(0), N);
}
else
{
if (widthTarget / widthSource != 8 || heightTarget / heightSource != 8)
printf("Kernel only implemented for 8x resize. Notify us if this error appears.\n");
const auto rescaleFactor = (unsigned int) std::ceil(heightTarget / (float)(heightSource));
const dim3 threadsPerBlock{rescaleFactor, rescaleFactor, 1};
const dim3 numBlocks{
getNumberCudaBlocks(widthTarget, threadsPerBlock.x),
getNumberCudaBlocks(heightTarget, threadsPerBlock.y),
getNumberCudaBlocks(num * channels, threadsPerBlock.z)};
resize8TimesKernel<<<numBlocks, threadsPerBlock>>>(
targetPtr, sourcePtrs.at(0), widthSource, heightSource, widthTarget, heightTarget,
rescaleFactor);
}
// OP_CUDA_PROFILE_END(timeNormalize3, 1e3, REPS);
// // Profiling code
// opLog(" Res(ori)=" + std::to_string(timeNormalize1) + "ms");
// opLog(" Res(new)=" + std::to_string(timeNormalize2) + "ms");
// opLog(" Res(new8x)=" + std::to_string(timeNormalize3) + "ms");
}
// Old inefficient multi-scale merging
else
printf("It should never reache this point. Notify us otherwise.\n");
}
// Multi-scaling merging
else
{
const auto scaleToMainScaleWidth = widthTarget / T(widthSource);
const auto scaleToMainScaleHeight = heightTarget / T(heightSource);
// // Profiling code
// const auto REPS = 10;
// // const auto REPS = 100;
// double timeNormalize1 = 0.;
// double timeNormalize2 = 0.;
// double timeNormalize3 = 0.;
// // Non-optimized function
// OP_CUDA_PROFILE_INIT(REPS);
// const auto targetChannelOffset = widthTarget * heightTarget;
// cudaMemset(targetPtr, 0, channels*targetChannelOffset * sizeof(T));
// for (auto i = 0u ; i < sourceSizes.size(); ++i)
// {
// const auto& currentSize = sourceSizes.at(i);
// const auto currentHeight = currentSize[2];
// const auto currentWidth = currentSize[3];
// const auto sourceChannelOffset = currentHeight * currentWidth;
// const auto scaleInputToNet = scaleInputToNetInputs[i] / scaleInputToNetInputs[0];
// const auto scaleWidth = scaleToMainScaleWidth / scaleInputToNet;
// const auto scaleHeight = scaleToMainScaleHeight / scaleInputToNet;
// // All but last image --> add
// if (i < sourceSizes.size() - 1)
// {
// for (auto c = 0 ; c < channels ; c++)
// {
// resizeAndAddKernelOld<<<numBlocks, threadsPerBlock>>>(
// targetPtr + c * targetChannelOffset, sourcePtrs[i] + c * sourceChannelOffset,
// scaleWidth, scaleHeight, currentWidth, currentHeight, widthTarget,
// heightTarget);
// }
// }
// // Last image --> average all
// else
// {
// for (auto c = 0 ; c < channels ; c++)
// {
// resizeAndAverageKernelOld<<<numBlocks, threadsPerBlock>>>(
// targetPtr + c * targetChannelOffset, sourcePtrs[i] + c * sourceChannelOffset,
// scaleWidth, scaleHeight, currentWidth, currentHeight, widthTarget,
// heightTarget, (int)sourceSizes.size());
// }
// }
// }
// OP_CUDA_PROFILE_END(timeNormalize1, 1e3, REPS);
// // Optimized function for any resize size (suboptimal for 8x resize)
// OP_CUDA_PROFILE_INIT(REPS);
// const auto targetChannelOffset = widthTarget * heightTarget;
// cudaMemset(targetPtr, 0, channels*targetChannelOffset * sizeof(T));
// const dim3 threadsPerBlock{THREADS_PER_BLOCK_1D, THREADS_PER_BLOCK_1D, 1};
// const dim3 numBlocks{
// getNumberCudaBlocks(widthTarget, threadsPerBlock.x),
// getNumberCudaBlocks(heightTarget, threadsPerBlock.y),
// getNumberCudaBlocks(channels, threadsPerBlock.z)};
// for (auto i = 0u ; i < sourceSizes.size(); ++i)
// {
// const auto& currentSize = sourceSizes.at(i);
// const auto currentHeight = currentSize[2];
// const auto currentWidth = currentSize[3];
// const auto scaleInputToNet = scaleInputToNetInputs[i] / scaleInputToNetInputs[0];
// const auto scaleWidth = scaleToMainScaleWidth / scaleInputToNet;
// const auto scaleHeight = scaleToMainScaleHeight / scaleInputToNet;
// // All but last image --> add
// if (i < sourceSizes.size() - 1)
// resizeAndAddKernel<<<numBlocks, threadsPerBlock>>>(
// targetPtr, sourcePtrs[i], scaleWidth, scaleHeight, currentWidth, currentHeight,
// widthTarget, heightTarget);
// // Last image --> average all
// else
// resizeAndAverageKernelOld<<<numBlocks, threadsPerBlock>>>(
// targetPtr, sourcePtrs[i], scaleWidth, scaleHeight, currentWidth, currentHeight,
// widthTarget, heightTarget, (int)sourceSizes.size());
// }
// OP_CUDA_PROFILE_END(timeNormalize2, 1e3, REPS);
// Super optimized function
// OP_CUDA_PROFILE_INIT(REPS);
if (sourcePtrs.size() > 8)
printf("More than 8 scales are not implemented (yet). Notify us to implement it.\n");
const dim3 threadsPerBlock{THREADS_PER_BLOCK_1D, THREADS_PER_BLOCK_1D, 1};
const dim3 numBlocks{
getNumberCudaBlocks(widthTarget, threadsPerBlock.x),
getNumberCudaBlocks(heightTarget, threadsPerBlock.y),
getNumberCudaBlocks(channels, threadsPerBlock.z)};
// Fill auxiliary params
std::vector<int> widthSourcesCpu(sourceSizes.size());
std::vector<int> heightSourcesCpu(sourceSizes.size());
std::vector<T> scaleWidthsCpu(sourceSizes.size());
std::vector<T> scaleHeightsCpu(sourceSizes.size());
for (auto i = 0u ; i < sourceSizes.size(); ++i)
{
const auto& currentSize = sourceSizes.at(i);
heightSourcesCpu[i] = currentSize[2];
widthSourcesCpu[i] = currentSize[3];
const auto scaleInputToNet = scaleInputToNetInputs[i] / scaleInputToNetInputs[0];
scaleWidthsCpu[i] = scaleToMainScaleWidth / scaleInputToNet;
scaleHeightsCpu[i] = scaleToMainScaleHeight / scaleInputToNet;
}
// GPU params
int* widthSources;
cudaMalloc((void**)&widthSources, sizeof(int) * sourceSizes.size());
cudaMemcpy(
widthSources, widthSourcesCpu.data(), sizeof(int) * sourceSizes.size(),
cudaMemcpyHostToDevice);
int* heightSources;
cudaMalloc((void**)&heightSources, sizeof(int) * sourceSizes.size());
cudaMemcpy(
heightSources, heightSourcesCpu.data(), sizeof(int) * sourceSizes.size(),
cudaMemcpyHostToDevice);
T* scaleWidths;
cudaMalloc((void**)&scaleWidths, sizeof(T) * sourceSizes.size());
cudaMemcpy(
scaleWidths, scaleWidthsCpu.data(), sizeof(T) * sourceSizes.size(),
cudaMemcpyHostToDevice);
T* scaleHeights;
cudaMalloc((void**)&scaleHeights, sizeof(T) * sourceSizes.size());
cudaMemcpy(
scaleHeights, scaleHeightsCpu.data(), sizeof(T) * sourceSizes.size(),
cudaMemcpyHostToDevice);
// Resize each channel, add all, and get average
resizeAndAddAndAverageKernel<<<numBlocks, threadsPerBlock>>>(
targetPtr, (int)sourceSizes.size(), scaleWidths, scaleHeights, widthSources, heightSources,
widthTarget, heightTarget, sourcePtrs[0], sourcePtrs[1], sourcePtrs[2], sourcePtrs[3],
sourcePtrs[4], sourcePtrs[5], sourcePtrs[6], sourcePtrs[7]);
// Free memory
if (widthSources != nullptr)
cudaFree(widthSources);
if (heightSources != nullptr)
cudaFree(heightSources);
if (scaleWidths != nullptr)
cudaFree(scaleWidths);
if (scaleHeights != nullptr)
cudaFree(scaleHeights);
// OP_CUDA_PROFILE_END(timeNormalize3, 1e3, REPS);
// // Profiling code
// opLog(" Res(orig)=" + std::to_string(timeNormalize1) + "ms");
// opLog(" Res(new4)=" + std::to_string(timeNormalize2) + "ms");
// opLog(" Res(new1)=" + std::to_string(timeNormalize3) + "ms");
}
// cudaCheck(__LINE__, __FUNCTION__, __FILE__);
}
catch (const std::exception& e)
{
printf("%s\n", e.what());
}
}
template <typename T>
void resizeAndPadRbgGpu(
T* targetPtr, const T* const srcPtr, const int widthSource, const int heightSource,
const int widthTarget, const int heightTarget, const T scaleFactor)
{
try
{
const auto channels = 3;
const dim3 threadsPerBlock{THREADS_PER_BLOCK_1D, THREADS_PER_BLOCK_1D, 1};
const dim3 numBlocks{
getNumberCudaBlocks(widthTarget, threadsPerBlock.x),
getNumberCudaBlocks(heightTarget, threadsPerBlock.y),
getNumberCudaBlocks(channels, threadsPerBlock.z)};
resizeAndPadKernel<<<numBlocks, threadsPerBlock>>>(
targetPtr, srcPtr, widthSource, heightSource, widthTarget, heightTarget, scaleFactor);
}
catch (const std::exception& e)
{
printf("%s\n", e.what());
}
}
template <typename T>
void resizeAndPadRbgGpu(
T* targetPtr, const unsigned char* const srcPtr, const int widthSource, const int heightSource,
const int widthTarget, const int heightTarget, const T scaleFactor)
{
try
{
const auto channels = 3;
const dim3 threadsPerBlock{THREADS_PER_BLOCK_1D, THREADS_PER_BLOCK_1D, 1};
const dim3 numBlocks{
getNumberCudaBlocks(widthTarget, threadsPerBlock.x),
getNumberCudaBlocks(heightTarget, threadsPerBlock.y),
getNumberCudaBlocks(channels, threadsPerBlock.z)};
resizeAndPadKernel<<<numBlocks, threadsPerBlock>>>(
targetPtr, srcPtr, widthSource, heightSource, widthTarget, heightTarget, scaleFactor);
}
catch (const std::exception& e)
{
printf("%s\n", e.what());
}
}
template void resizeAndMergeGpu(
float* targetPtr, const std::vector<const float*>& sourcePtrs, const std::array<int, 4>& targetSize,
const std::vector<std::array<int, 4>>& sourceSizes, const std::vector<float>& scaleInputToNetInputs);
template void resizeAndMergeGpu(
double* targetPtr, const std::vector<const double*>& sourcePtrs, const std::array<int, 4>& targetSize,
const std::vector<std::array<int, 4>>& sourceSizes, const std::vector<double>& scaleInputToNetInputs);
template void resizeAndPadRbgGpu(
float* targetPtr, const float* const srcPtr, const int widthSource, const int heightSource,
const int widthTarget, const int heightTarget, const float scaleFactor);
template void resizeAndPadRbgGpu(
double* targetPtr, const double* const srcPtr, const int widthSource, const int heightSource,
const int widthTarget, const int heightTarget, const double scaleFactor);
template void resizeAndPadRbgGpu(
float* targetPtr, const unsigned char* const srcPtr, const int widthSource, const int heightSource,
const int widthTarget, const int heightTarget, const float scaleFactor);
template void resizeAndPadRbgGpu(
double* targetPtr, const unsigned char* const srcPtr, const int widthSource, const int heightSource,
const int widthTarget, const int heightTarget, const double scaleFactor);
}
|
9320a81f1564e1ef31a2b639ca86c7e01f7da3c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/transform_reduce.h>
#include "common.h"
#include "mgpcg.h"
namespace pcg
{
__host__ __device__ bool is_fluid(
CONST grid_cell<char>& f, int i, int j, int k, int nx, int ny, int nz) CONST
{
return i >= 0 && i < nx && j >= 0 && j < ny && k >= 0 && k < nz &&
LIQUID == f.get(i, j, k);
}
__host__ __device__ bool is_solid(
CONST grid_cell<char>& f, int i, int j, int k, int nx, int ny, int nz) CONST
{
return i < 0 || i >= nx || j < 0 || j >= ny || k < 0 || k >= nz ||
SOLID == f.get(i, j, k);
}
__host__ __device__ bool is_air(
CONST grid_cell<char>& f, int i, int j, int k, int nx, int ny, int nz) CONST
{
return i >= 0 && i < nx && j >= 0 && j < ny && k >= 0 && k < nz &&
AIR == f.get(i, j, k);
}
__host__ __device__ float neighbor_sum(CONST grid_cell<float>& Lxn,
CONST grid_cell<float>& Lxp,
CONST grid_cell<float>& Lyn,
CONST grid_cell<float>& Lyp,
CONST grid_cell<float>& Lzn,
CONST grid_cell<float>& Lzp,
CONST grid_cell<float>& x,
CONST grid_cell<char>& f,
int i,
int j,
int k,
int nx,
int ny,
int nz) CONST
{
int in = (i - 1 + nx) % nx;
int ip = (i + 1) % nx;
int jn = (j - 1 + ny) % ny;
int jp = (j + 1) % ny;
int kn = (k - 1 + nz) % nz;
int kp = (k + 1) % nz;
return x.get(in, j, k) * Lxn.get(i, j, k) +
x.get(ip, j, k) * Lxp.get(i, j, k) +
x.get(i, jn, k) * Lyn.get(i, j, k) +
x.get(i, jp, k) * Lyp.get(i, j, k) +
x.get(i, j, kn) * Lzn.get(i, j, k) +
x.get(i, j, kp) * Lzp.get(i, j, k);
}
__global__ void init_L(CONST grid_cell<char> f,
grid_cell<float> L_diag,
grid_cell<float> L_diag_inv,
grid_cell<float> Lxn,
grid_cell<float> Lxp,
grid_cell<float> Lyn,
grid_cell<float> Lyp,
grid_cell<float> Lzn,
grid_cell<float> Lzp,
int nx,
int ny,
int nz)
{
KERNAL_CONFIG
// int nx = f.get_nx();
// int ny = f.get_ny();
// int nz = f.get_nz();
if (i < nx && j < ny && k < nz)
{
if (LIQUID == f.get(i, j, k))
{
float s = 6.0f;
s -= float(is_solid(f, i - 1, j, k, nx, ny, nz));
s -= float(is_solid(f, i + 1, j, k, nx, ny, nz));
s -= float(is_solid(f, i, j - 1, k, nx, ny, nz));
s -= float(is_solid(f, i, j + 1, k, nx, ny, nz));
s -= float(is_solid(f, i, j, k - 1, nx, ny, nz));
s -= float(is_solid(f, i, j, k + 1, nx, ny, nz));
L_diag.get(i, j, k) = s;
L_diag_inv.get(i, j, k) = 1.0f / s;
}
Lxn.get(i, j, k) = float(is_fluid(f, i - 1, j, k, nx, ny, nz));
Lxp.get(i, j, k) = float(is_fluid(f, i + 1, j, k, nx, ny, nz));
Lyn.get(i, j, k) = float(is_fluid(f, i, j - 1, k, nx, ny, nz));
Lyp.get(i, j, k) = float(is_fluid(f, i, j + 1, k, nx, ny, nz));
Lzn.get(i, j, k) = float(is_fluid(f, i, j, k - 1, nx, ny, nz));
Lzp.get(i, j, k) = float(is_fluid(f, i, j, k + 1, nx, ny, nz));
}
}
__host__ __device__ float get_Ldiag(
CONST grid_cell<char> f, int i, int j, int k, int nx, int ny, int nz)
{
float s = 6.0f;
s -= float(is_solid(f, i - 1, j, k, nx, ny, nz));
s -= float(is_solid(f, i + 1, j, k, nx, ny, nz));
s -= float(is_solid(f, i, j - 1, k, nx, ny, nz));
s -= float(is_solid(f, i, j + 1, k, nx, ny, nz));
s -= float(is_solid(f, i, j, k - 1, nx, ny, nz));
s -= float(is_solid(f, i, j, k + 1, nx, ny, nz));
return s;
}
__host__ __device__ float get_Ldiaginv(
CONST grid_cell<char> f, int i, int j, int k, int nx, int ny, int nz)
{
float s = 6.0f;
s -= float(is_solid(f, i - 1, j, k, nx, ny, nz));
s -= float(is_solid(f, i + 1, j, k, nx, ny, nz));
s -= float(is_solid(f, i, j - 1, k, nx, ny, nz));
s -= float(is_solid(f, i, j + 1, k, nx, ny, nz));
s -= float(is_solid(f, i, j, k - 1, nx, ny, nz));
s -= float(is_solid(f, i, j, k + 1, nx, ny, nz));
return 1.0f / s;
}
__host__ __device__ float get_Lxn(
CONST grid_cell<char> f, int i, int j, int k, int nx, int ny, int nz)
{
return float(is_fluid(f, i - 1, j, k, nx, ny, nz));
}
__host__ __device__ float get_Lxp(
CONST grid_cell<char> f, int i, int j, int k, int nx, int ny, int nz)
{
return float(is_fluid(f, i + 1, j, k, nx, ny, nz));
}
__host__ __device__ float get_Lyn(
CONST grid_cell<char> f, int i, int j, int k, int nx, int ny, int nz)
{
return float(is_fluid(f, i, j - 1, k, nx, ny, nz));
}
__host__ __device__ float get_Lyp(
CONST grid_cell<char> f, int i, int j, int k, int nx, int ny, int nz)
{
return float(is_fluid(f, i, j + 1, k, nx, ny, nz));
}
__host__ __device__ float get_Lzn(
CONST grid_cell<char> f, int i, int j, int k, int nx, int ny, int nz)
{
return float(is_fluid(f, i, j, k - 1, nx, ny, nz));
}
__host__ __device__ float get_Lzp(
CONST grid_cell<char> f, int i, int j, int k, int nx, int ny, int nz)
{
return float(is_fluid(f, i, j, k + 1, nx, ny, nz));
}
__host__ __device__ float neighbor_sum(CONST grid_cell<float>& x,
CONST grid_cell<char>& f,
int i,
int j,
int k,
int nx,
int ny,
int nz) CONST
{
int in = (i - 1 + nx) % nx;
int ip = (i + 1) % nx;
int jn = (j - 1 + ny) % ny;
int jp = (j + 1) % ny;
int kn = (k - 1 + nz) % nz;
int kp = (k + 1) % nz;
return x.get(in, j, k) * get_Lxn(f, i, j, k, nx, ny, nz) +
x.get(ip, j, k) * get_Lxp(f, i, j, k, nx, ny, nz) +
x.get(i, jn, k) * get_Lyn(f, i, j, k, nx, ny, nz) +
x.get(i, jp, k) * get_Lyp(f, i, j, k, nx, ny, nz) +
x.get(i, j, kn) * get_Lzn(f, i, j, k, nx, ny, nz) +
x.get(i, j, kp) * get_Lzp(f, i, j, k, nx, ny, nz);
}
__global__ void smooth(grid_cell<float> z,
grid_cell<float> r,
grid_cell<char> f,
CONST int nx,
CONST int ny,
CONST int nz,
int phase)
{
KERNAL_CONFIG
if (i < nx && j < ny && k < nz && (i + j + k) % 2 == phase &&
LIQUID == f.get(i, j, k))
{
float rhs = r.get(i, j, k);
rhs += neighbor_sum(z, f, i, j, k, nx, ny, nz);
z.get(i, j, k) = rhs / get_Ldiag(f, i, j, k, nx, ny, nz);
}
}
__global__ void regularize(
grid_cell<float> data, grid_cell<char> f, int nx, int ny, int nz)
{
KERNAL_CONFIG
if (i < nx && j < ny && k < nz && LIQUID != f.get(i, j, k))
{
data.get(i, j, k) = 0.0f;
}
}
__global__ void formPoisson_count_nonzero(CONST grid_cell<char> fluid_flag,
CONST grid_cell<int> cell_index,
int* count)
{
KERNAL_CONFIG
// build consistent ordering
int nx = fluid_flag.get_nx();
int ny = fluid_flag.get_ny();
int nz = fluid_flag.get_nz();
if (i < nx && j < ny && k < nz)
{
if (LIQUID == fluid_flag.get(i, j, k))
{
int cid = cell_index.get(i, j, k);
int c = 1;
c += is_fluid(fluid_flag, i, j, k - 1, nx, ny, nz);
c += is_fluid(fluid_flag, i, j - 1, k, nx, ny, nz);
c += is_fluid(fluid_flag, i - 1, j, k, nx, ny, nz);
c += is_fluid(fluid_flag, i + 1, j, k, nx, ny, nz);
c += is_fluid(fluid_flag, i, j + 1, k, nx, ny, nz);
c += is_fluid(fluid_flag, i, j, k + 1, nx, ny, nz);
count[cid] = c;
}
}
}
__global__ void formPoisson_build_matrix(CONST grid_cell<char> fluid_flag,
CONST grid_cell<int> cell_index,
CONST int* I,
int* J,
float* val)
{
KERNAL_CONFIG
// build consistent ordering
int nx = fluid_flag.get_nx();
int ny = fluid_flag.get_ny();
int nz = fluid_flag.get_nz();
if (i < nx && j < ny && k < nz)
{
if (LIQUID == fluid_flag.get(i, j, k))
{
int cid = cell_index.get(i, j, k);
int NZ = I[cid];
if (is_fluid(fluid_flag, i, j, k - 1, nx, ny, nz))
{
J[NZ] = cell_index.get(i, j, k - 1);
val[NZ] = -1.0f;
NZ++;
}
if (is_fluid(fluid_flag, i, j - 1, k, nx, ny, nz))
{
J[NZ] = cell_index.get(i, j - 1, k);
val[NZ] = -1.0f;
NZ++;
}
if (is_fluid(fluid_flag, i - 1, j, k, nx, ny, nz))
{
J[NZ] = cell_index.get(i - 1, j, k);
val[NZ] = -1.0f;
NZ++;
}
{
J[NZ] = cell_index.get(i, j, k);
val[NZ] = get_Ldiag(fluid_flag, i, j, k, nx, ny, nz);
NZ++;
}
if (is_fluid(fluid_flag, i + 1, j, k, nx, ny, nz))
{
J[NZ] = cell_index.get(i + 1, j, k);
val[NZ] = -1.0f;
NZ++;
}
if (is_fluid(fluid_flag, i, j + 1, k, nx, ny, nz))
{
J[NZ] = cell_index.get(i, j + 1, k);
val[NZ] = -1.0f;
NZ++;
}
if (is_fluid(fluid_flag, i, j, k + 1, nx, ny, nz))
{
J[NZ] = cell_index.get(i, j, k + 1);
val[NZ] = -1.0f;
NZ++;
}
}
}
}
__global__ void downsample_f(
grid_cell<char> f_fine, grid_cell<char> f_coarse, int nx, int ny, int nz)
{
KERNAL_CONFIG
if (i < nx && j < ny && k < nz)
{
int i2 = i * 2;
int j2 = j * 2;
int k2 = k * 2;
if (AIR == f_fine.get(i2, j2, k2) || //
AIR == f_fine.get(i2 + 1, j2, k2) || //
AIR == f_fine.get(i2, j2 + 1, k2) || //
AIR == f_fine.get(i2 + 1, j2 + 1, k2) || //
AIR == f_fine.get(i2, j2, k2 + 1) || //
AIR == f_fine.get(i2 + 1, j2, k2 + 1) || //
AIR == f_fine.get(i2, j2 + 1, k2 + 1) || //
AIR == f_fine.get(i2 + 1, j2 + 1, k2 + 1))
{
f_coarse.get(i, j, k) = AIR;
}
else if (LIQUID == f_fine.get(i2, j2, k2) || //
LIQUID == f_fine.get(i2 + 1, j2, k2) || //
LIQUID == f_fine.get(i2, j2 + 1, k2) || //
LIQUID == f_fine.get(i2 + 1, j2 + 1, k2) || //
LIQUID == f_fine.get(i2, j2, k2 + 1) || //
LIQUID == f_fine.get(i2 + 1, j2, k2 + 1) || //
LIQUID == f_fine.get(i2, j2 + 1, k2 + 1) || //
LIQUID == f_fine.get(i2 + 1, j2 + 1, k2 + 1))
{
f_coarse.get(i, j, k) = LIQUID;
}
else
{
f_coarse.get(i, j, k) = SOLID;
}
}
}
__global__ void restrict_(grid_cell<float> r_fine,
grid_cell<char> f_fine,
grid_cell<float> z_fine,
grid_cell<float> r_coarse,
CONST int nx,
CONST int ny,
CONST int nz)
{
KERNAL_CONFIG
if (i < nx && j < ny && k < nz && LIQUID == f_fine.get(i, j, k))
{
float Az = get_Ldiag(f_fine, i, j, k, nx, ny, nz) * z_fine.get(i, j, k);
Az -= neighbor_sum(z_fine, f_fine, i, j, k, nx, ny, nz);
float res = r_fine.get(i, j, k) - Az;
atomicAdd(&r_coarse.get(i / 2, j / 2, k / 2), res * 0.5f);
}
}
__global__ void prolongate(grid_cell<float> z_fine,
grid_cell<float> z_coarse,
CONST int nx,
CONST int ny,
CONST int nz)
{
KERNAL_CONFIG
if (i < nx && j < ny && k < nz)
{
z_fine.get(i, j, k) += z_coarse.get(i / 2, j / 2, k / 2);
}
}
__global__ void calc_Ap_kernel(grid_cell<float> Ap,
grid_cell<float> p,
grid_cell<char> f,
CONST int nx,
CONST int ny,
CONST int nz)
{
KERNAL_CONFIG
if (i < nx && j < ny && k < nz && LIQUID == f.get(i, j, k))
{
float _Ap = get_Ldiag(f, i, j, k, nx, ny, nz) * p.get(i, j, k);
_Ap -= neighbor_sum(p, f, i, j, k, nx, ny, nz);
Ap.get(i, j, k) = _Ap;
}
}
__global__ void calc_saxpy_kernel(grid_cell<float> x,
grid_cell<float> y,
grid_cell<char> f,
const float a,
CONST int nx,
CONST int ny,
CONST int nz)
{
KERNAL_CONFIG
if (i < nx && j < ny && k < nz && LIQUID == f.get(i, j, k))
{
y.get(i, j, k) += a * x.get(i, j, k);
}
}
__global__ void calc_sxpay_kernel(grid_cell<float> x,
grid_cell<float> y,
grid_cell<char> f,
const float a,
CONST int nx,
CONST int ny,
CONST int nz)
{
KERNAL_CONFIG
if (i < nx && j < ny && k < nz && LIQUID == f.get(i, j, k))
{
y.get(i, j, k) = x.get(i, j, k) + a * y.get(i, j, k);
}
}
class MGPCGSolver
{
protected:
int nx, ny, nz;
int max_iters;
int n_mg_levels;
int n_pre_and_pose_smoothing;
int n_bottom_smoothing;
bool use_precon;
std::vector<grid_cell<float>> __r;
std::vector<grid_cell<float>> __z;
std::vector<grid_cell<char>> __f;
hipblasHandle_t cublasHandle = 0;
hipsparseHandle_t cusparseHandle = 0;
public:
MGPCGSolver(int nx_, int ny_, int nz_) : nx(nx_), ny(ny_), nz(nz_)
{
max_iters = 100;
// n_mg_levels = 4;
n_mg_levels = 5; // reduce to RGBS preconditioning if =1
// for low res grid
// n_pre_and_pose_smoothing = 2;
// n_bottom_smoothing = 10;
// for high res grid
n_pre_and_pose_smoothing = 4;
n_bottom_smoothing = 30;
use_precon = true;
auto get_res = [this](int level) {
return make_int3(
nx / (1 << level), ny / (1 << level), nz / (1 << level));
};
__r.resize(n_mg_levels);
__z.resize(n_mg_levels);
__f.resize(n_mg_levels);
// no level 0
for (int l = 1; l < n_mg_levels; l++)
{
auto res = get_res(l);
__r[l].init_gpu(res.x, res.y, res.z);
__z[l].init_gpu(res.x, res.y, res.z);
__f[l].init_gpu(res.x, res.y, res.z);
}
checkCudaErrors(hipblasCreate(&cublasHandle));
checkCudaErrors(hipsparseCreate(&cusparseHandle));
}
~MGPCGSolver()
{
// no level 0
for (int l = 1; l < n_mg_levels; l++)
{
__r[l].free_gpu();
__z[l].free_gpu();
__f[l].free_gpu();
}
hipblasDestroy(cublasHandle);
hipsparseDestroy(cusparseHandle);
}
void prepare_preconditioner(grid_cell<float>& r0_buffer,
grid_cell<float>& z0_buffer,
grid_cell<char>& f0_buffer)
{
//__r[0].init_ref_gpu(nx, ny, nz, r0_buffer.get_ptr());
//__z[0].init_ref_gpu(nx, ny, nz, z0_buffer.get_ptr());
//__f[0].init_ref_gpu(nx, ny, nz, f0_buffer.get_ptr());
__r[0] = r0_buffer.cast<grid_cell<float>>(nx, ny, nz);
__z[0] = z0_buffer.cast<grid_cell<float>>(nx, ny, nz);
__f[0] = f0_buffer.cast<grid_cell<char>>(nx, ny, nz);
}
void apply_preconditioner()
{
dim3 block(8, 8, 8);
__z[0].clear_gpu();
// pre smoothing
for (int level = 0; level < n_mg_levels - 1; level++)
{
int dim_x = nx / (1 << level);
int dim_y = ny / (1 << level);
int dim_z = nz / (1 << level);
dim3 grid(divUp(dim_x, block.x),
divUp(dim_y, block.y),
divUp(dim_z, block.z));
for (int i = 0; i < n_pre_and_pose_smoothing; i++)
{
for (int phase = 0; phase < 2; phase++)
hipLaunchKernelGGL(( smooth), dim3(grid), dim3(block), 0, 0, __z[level],
__r[level],
__f[level],
dim_x,
dim_y,
dim_z,
phase);
}
__z[level + 1].clear_gpu();
__r[level + 1].clear_gpu();
hipLaunchKernelGGL(( restrict_), dim3(grid), dim3(block), 0, 0, __r[level],
__f[level],
__z[level],
__r[level + 1],
dim_x,
dim_y,
dim_z);
}
// bottom smoothing
{
int halfcount = n_bottom_smoothing / 2;
int level = n_mg_levels - 1;
int dim_x = nx / (1 << level);
int dim_y = ny / (1 << level);
int dim_z = nz / (1 << level);
dim3 grid(divUp(dim_x, block.x),
divUp(dim_y, block.y),
divUp(dim_z, block.z));
for (int order = 0; order < 2; order++)
{
for (int i = 0; i < halfcount; i++)
{
for (int phase = 0; phase < 2; phase++)
hipLaunchKernelGGL(( smooth), dim3(grid), dim3(block), 0, 0, __z[level],
__r[level],
__f[level],
dim_x,
dim_y,
dim_z,
(phase + order) % 2);
}
}
}
// post smoothing
for (int level = n_mg_levels - 2; level >= 0; level--)
{
int dim_x = nx / (1 << level);
int dim_y = ny / (1 << level);
int dim_z = nz / (1 << level);
dim3 grid(divUp(dim_x, block.x),
divUp(dim_y, block.y),
divUp(dim_z, block.z));
hipLaunchKernelGGL(( prolongate), dim3(grid), dim3(block), 0, 0,
__z[level], __z[level + 1], dim_x, dim_y, dim_z);
for (int i = 0; i < n_pre_and_pose_smoothing; i++)
{
for (int phase = 0; phase < 2; phase++)
hipLaunchKernelGGL(( smooth), dim3(grid), dim3(block), 0, 0, __z[level],
__r[level],
__f[level],
dim_x,
dim_y,
dim_z,
phase);
}
}
}
float calc_dot(grid_cell<float>& a, grid_cell<float>& b)
{
dim3 block(8, 8, 8);
dim3 grid(divUp(nx, block.x), divUp(ny, block.y), divUp(nz, block.z));
hipLaunchKernelGGL(( regularize), dim3(grid), dim3(block), 0, 0, a, __f[0], nx, ny, nz);
hipLaunchKernelGGL(( regularize), dim3(grid), dim3(block), 0, 0, b, __f[0], nx, ny, nz);
float dot;
hipblasSdot(
cublasHandle, nx * ny * nz, a.get_ptr(), 1, b.get_ptr(), 1, &dot);
return dot;
}
void calc_Ap(grid_cell<float>& Ap,
grid_cell<float>& p,
CONST int nx,
CONST int ny,
CONST int nz)
{
dim3 block(8, 8, 8);
dim3 grid(divUp(nx, block.x), divUp(ny, block.y), divUp(nz, block.z));
hipLaunchKernelGGL(( regularize), dim3(grid), dim3(block), 0, 0, p, __f[0], nx, ny, nz);
hipLaunchKernelGGL(( calc_Ap_kernel), dim3(grid), dim3(block), 0, 0, Ap, p, __f[0], nx, ny, nz);
hipLaunchKernelGGL(( regularize), dim3(grid), dim3(block), 0, 0, Ap, __f[0], nx, ny, nz);
}
void calc_saxpy(grid_cell<float>& x,
grid_cell<float>& y,
const float a,
CONST int nx,
CONST int ny,
CONST int nz)
{
dim3 block(8, 8, 8);
dim3 grid(divUp(nx, block.x), divUp(ny, block.y), divUp(nz, block.z));
hipLaunchKernelGGL(( regularize), dim3(grid), dim3(block), 0, 0, x, __f[0], nx, ny, nz);
hipLaunchKernelGGL(( regularize), dim3(grid), dim3(block), 0, 0, y, __f[0], nx, ny, nz);
hipLaunchKernelGGL(( calc_saxpy_kernel), dim3(grid), dim3(block), 0, 0, x, y, __f[0], a, nx, ny, nz);
}
void calc_sxpay(grid_cell<float>& x,
grid_cell<float>& y,
const float a,
CONST int nx,
CONST int ny,
CONST int nz)
{
dim3 block(8, 8, 8);
dim3 grid(divUp(nx, block.x), divUp(ny, block.y), divUp(nz, block.z));
hipLaunchKernelGGL(( regularize), dim3(grid), dim3(block), 0, 0, x, __f[0], nx, ny, nz);
hipLaunchKernelGGL(( regularize), dim3(grid), dim3(block), 0, 0, y, __f[0], nx, ny, nz);
hipLaunchKernelGGL(( calc_sxpay_kernel), dim3(grid), dim3(block), 0, 0, x, y, __f[0], a, nx, ny, nz);
}
void solve(grid_cell<float>& d_pressure,
grid_cell<float>& d_rhs,
grid_cell<float>& d_sdistance,
grid_cell<char>& d_fluid_flag,
grid_cell<float>& d_temp_0,
grid_cell<float>& d_temp_1,
grid_cell<float>& d_temp_2,
const int nx,
const int ny,
const int nz)
{
dim3 block(8, 8, 8);
dim3 grid(divUp(nx, block.x), divUp(ny, block.y), divUp(nz, block.z));
float r0, r1, alpha, beta;
float dot, nalpha;
int k;
const int max_iter = 1000;
const float tol = 1e-5f;
int precon = 2;
// Profiler _p1;
prepare_preconditioner(d_rhs, d_temp_2, d_fluid_flag);
auto& __x = d_pressure;
auto& __p = d_temp_0;
auto& __Ap = d_temp_1;
__x.clear_gpu();
if (precon == 2)
{
for (int level = 1; level < n_mg_levels; level++)
{
int dim_x = nx / (1 << level);
int dim_y = ny / (1 << level);
int dim_z = nz / (1 << level);
dim3 grid(divUp(dim_x, block.x),
divUp(dim_y, block.y),
divUp(dim_z, block.z));
hipLaunchKernelGGL(( downsample_f), dim3(grid), dim3(block), 0, 0,
__f[level - 1], __f[level], dim_x, dim_y, dim_z);
}
}
//////////////////////////////////////////////////////////////////////////
// in case the rhs is zero when all fluid is free falling
{
// r' * r
dot = calc_dot(__r[0], __r[0]);
if (dot <= tol * tol)
{
return;
}
}
hipsparseMatDescr_t descr = 0;
checkCudaErrors(hipsparseCreateMatDescr(&descr));
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
printf("\tConvergence of conjugate gradient: \n");
//////////////////////////////////////////////////////////////////////////
if (precon == 0)
{
// z := r
__z[0].copy_from_gpu(__r[0]);
}
else if (precon == 2)
{
apply_preconditioner();
}
else
{
printf("invalid precon\n");
throw std::runtime_error("invalid precon");
}
// p := z
__p.copy_from_gpu(__z[0]);
// r' * z
r1 = calc_dot(__r[0], __z[0]);
k = 0;
while (k++ < max_iter)
{
// A * p
calc_Ap(__Ap, __p, nx, ny, nz);
// p' * A * p
dot = calc_dot(__p, __Ap);
alpha = r1 / dot;
// x + a * p
calc_saxpy(__p, __x, alpha, nx, ny, nz);
nalpha = -alpha;
// r - a * A * p
calc_saxpy(__Ap, __r[0], nalpha, nx, ny, nz);
// r' * r
dot = calc_dot(__r[0], __r[0]);
if (dot <= tol * tol) break;
if (precon == 0)
{
// z := r
__z[0].copy_from_gpu(__r[0]);
}
else if (precon == 2)
{
apply_preconditioner();
}
else
{
printf("invalid precon\n");
throw std::runtime_error("invalid precon");
}
r0 = r1;
// r' * z
r1 = calc_dot(__r[0], __z[0]);
beta = r1 / r0;
// z + b * p
calc_sxpay(__z[0], __p, beta, nx, ny, nz);
}
__sync();
printf("\titeration = %3d, residual = %e \n", k, sqrt(r1));
//////////////////////////////////////////////////////////////////////////
hipLaunchKernelGGL(( regularize), dim3(grid), dim3(block), 0, 0, __x, __f[0], nx, ny, nz);
}
}; // namespace pcg
void pcg_solve_poisson_gpu(grid_cell<float>& d_pressure,
grid_cell<float>& d_rhs,
grid_cell<float>& d_sdistance,
grid_cell<char>& d_fluid_flag,
grid_cell<float>& d_temp_buffer_0,
grid_cell<float>& d_temp_buffer_1,
grid_cell<float>& d_temp_buffer_2,
const int nx,
const int ny,
const int nz)
{
static MGPCGSolver solver(nx, ny, nz);
solver.solve(d_pressure,
d_rhs,
d_sdistance,
d_fluid_flag,
d_temp_buffer_0,
d_temp_buffer_1,
d_temp_buffer_2,
nx,
ny,
nz);
}
} // namespace pcg
| 9320a81f1564e1ef31a2b639ca86c7e01f7da3c1.cu | #include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/transform_reduce.h>
#include "common.h"
#include "mgpcg.h"
namespace pcg
{
__host__ __device__ bool is_fluid(
CONST grid_cell<char>& f, int i, int j, int k, int nx, int ny, int nz) CONST
{
return i >= 0 && i < nx && j >= 0 && j < ny && k >= 0 && k < nz &&
LIQUID == f.get(i, j, k);
}
__host__ __device__ bool is_solid(
CONST grid_cell<char>& f, int i, int j, int k, int nx, int ny, int nz) CONST
{
return i < 0 || i >= nx || j < 0 || j >= ny || k < 0 || k >= nz ||
SOLID == f.get(i, j, k);
}
__host__ __device__ bool is_air(
CONST grid_cell<char>& f, int i, int j, int k, int nx, int ny, int nz) CONST
{
return i >= 0 && i < nx && j >= 0 && j < ny && k >= 0 && k < nz &&
AIR == f.get(i, j, k);
}
__host__ __device__ float neighbor_sum(CONST grid_cell<float>& Lxn,
CONST grid_cell<float>& Lxp,
CONST grid_cell<float>& Lyn,
CONST grid_cell<float>& Lyp,
CONST grid_cell<float>& Lzn,
CONST grid_cell<float>& Lzp,
CONST grid_cell<float>& x,
CONST grid_cell<char>& f,
int i,
int j,
int k,
int nx,
int ny,
int nz) CONST
{
int in = (i - 1 + nx) % nx;
int ip = (i + 1) % nx;
int jn = (j - 1 + ny) % ny;
int jp = (j + 1) % ny;
int kn = (k - 1 + nz) % nz;
int kp = (k + 1) % nz;
return x.get(in, j, k) * Lxn.get(i, j, k) +
x.get(ip, j, k) * Lxp.get(i, j, k) +
x.get(i, jn, k) * Lyn.get(i, j, k) +
x.get(i, jp, k) * Lyp.get(i, j, k) +
x.get(i, j, kn) * Lzn.get(i, j, k) +
x.get(i, j, kp) * Lzp.get(i, j, k);
}
__global__ void init_L(CONST grid_cell<char> f,
grid_cell<float> L_diag,
grid_cell<float> L_diag_inv,
grid_cell<float> Lxn,
grid_cell<float> Lxp,
grid_cell<float> Lyn,
grid_cell<float> Lyp,
grid_cell<float> Lzn,
grid_cell<float> Lzp,
int nx,
int ny,
int nz)
{
KERNAL_CONFIG
// int nx = f.get_nx();
// int ny = f.get_ny();
// int nz = f.get_nz();
if (i < nx && j < ny && k < nz)
{
if (LIQUID == f.get(i, j, k))
{
float s = 6.0f;
s -= float(is_solid(f, i - 1, j, k, nx, ny, nz));
s -= float(is_solid(f, i + 1, j, k, nx, ny, nz));
s -= float(is_solid(f, i, j - 1, k, nx, ny, nz));
s -= float(is_solid(f, i, j + 1, k, nx, ny, nz));
s -= float(is_solid(f, i, j, k - 1, nx, ny, nz));
s -= float(is_solid(f, i, j, k + 1, nx, ny, nz));
L_diag.get(i, j, k) = s;
L_diag_inv.get(i, j, k) = 1.0f / s;
}
Lxn.get(i, j, k) = float(is_fluid(f, i - 1, j, k, nx, ny, nz));
Lxp.get(i, j, k) = float(is_fluid(f, i + 1, j, k, nx, ny, nz));
Lyn.get(i, j, k) = float(is_fluid(f, i, j - 1, k, nx, ny, nz));
Lyp.get(i, j, k) = float(is_fluid(f, i, j + 1, k, nx, ny, nz));
Lzn.get(i, j, k) = float(is_fluid(f, i, j, k - 1, nx, ny, nz));
Lzp.get(i, j, k) = float(is_fluid(f, i, j, k + 1, nx, ny, nz));
}
}
__host__ __device__ float get_Ldiag(
CONST grid_cell<char> f, int i, int j, int k, int nx, int ny, int nz)
{
float s = 6.0f;
s -= float(is_solid(f, i - 1, j, k, nx, ny, nz));
s -= float(is_solid(f, i + 1, j, k, nx, ny, nz));
s -= float(is_solid(f, i, j - 1, k, nx, ny, nz));
s -= float(is_solid(f, i, j + 1, k, nx, ny, nz));
s -= float(is_solid(f, i, j, k - 1, nx, ny, nz));
s -= float(is_solid(f, i, j, k + 1, nx, ny, nz));
return s;
}
__host__ __device__ float get_Ldiaginv(
CONST grid_cell<char> f, int i, int j, int k, int nx, int ny, int nz)
{
float s = 6.0f;
s -= float(is_solid(f, i - 1, j, k, nx, ny, nz));
s -= float(is_solid(f, i + 1, j, k, nx, ny, nz));
s -= float(is_solid(f, i, j - 1, k, nx, ny, nz));
s -= float(is_solid(f, i, j + 1, k, nx, ny, nz));
s -= float(is_solid(f, i, j, k - 1, nx, ny, nz));
s -= float(is_solid(f, i, j, k + 1, nx, ny, nz));
return 1.0f / s;
}
__host__ __device__ float get_Lxn(
CONST grid_cell<char> f, int i, int j, int k, int nx, int ny, int nz)
{
return float(is_fluid(f, i - 1, j, k, nx, ny, nz));
}
__host__ __device__ float get_Lxp(
CONST grid_cell<char> f, int i, int j, int k, int nx, int ny, int nz)
{
return float(is_fluid(f, i + 1, j, k, nx, ny, nz));
}
__host__ __device__ float get_Lyn(
CONST grid_cell<char> f, int i, int j, int k, int nx, int ny, int nz)
{
return float(is_fluid(f, i, j - 1, k, nx, ny, nz));
}
__host__ __device__ float get_Lyp(
CONST grid_cell<char> f, int i, int j, int k, int nx, int ny, int nz)
{
return float(is_fluid(f, i, j + 1, k, nx, ny, nz));
}
__host__ __device__ float get_Lzn(
CONST grid_cell<char> f, int i, int j, int k, int nx, int ny, int nz)
{
return float(is_fluid(f, i, j, k - 1, nx, ny, nz));
}
__host__ __device__ float get_Lzp(
CONST grid_cell<char> f, int i, int j, int k, int nx, int ny, int nz)
{
return float(is_fluid(f, i, j, k + 1, nx, ny, nz));
}
__host__ __device__ float neighbor_sum(CONST grid_cell<float>& x,
CONST grid_cell<char>& f,
int i,
int j,
int k,
int nx,
int ny,
int nz) CONST
{
int in = (i - 1 + nx) % nx;
int ip = (i + 1) % nx;
int jn = (j - 1 + ny) % ny;
int jp = (j + 1) % ny;
int kn = (k - 1 + nz) % nz;
int kp = (k + 1) % nz;
return x.get(in, j, k) * get_Lxn(f, i, j, k, nx, ny, nz) +
x.get(ip, j, k) * get_Lxp(f, i, j, k, nx, ny, nz) +
x.get(i, jn, k) * get_Lyn(f, i, j, k, nx, ny, nz) +
x.get(i, jp, k) * get_Lyp(f, i, j, k, nx, ny, nz) +
x.get(i, j, kn) * get_Lzn(f, i, j, k, nx, ny, nz) +
x.get(i, j, kp) * get_Lzp(f, i, j, k, nx, ny, nz);
}
__global__ void smooth(grid_cell<float> z,
grid_cell<float> r,
grid_cell<char> f,
CONST int nx,
CONST int ny,
CONST int nz,
int phase)
{
KERNAL_CONFIG
if (i < nx && j < ny && k < nz && (i + j + k) % 2 == phase &&
LIQUID == f.get(i, j, k))
{
float rhs = r.get(i, j, k);
rhs += neighbor_sum(z, f, i, j, k, nx, ny, nz);
z.get(i, j, k) = rhs / get_Ldiag(f, i, j, k, nx, ny, nz);
}
}
__global__ void regularize(
grid_cell<float> data, grid_cell<char> f, int nx, int ny, int nz)
{
KERNAL_CONFIG
if (i < nx && j < ny && k < nz && LIQUID != f.get(i, j, k))
{
data.get(i, j, k) = 0.0f;
}
}
__global__ void formPoisson_count_nonzero(CONST grid_cell<char> fluid_flag,
CONST grid_cell<int> cell_index,
int* count)
{
KERNAL_CONFIG
// build consistent ordering
int nx = fluid_flag.get_nx();
int ny = fluid_flag.get_ny();
int nz = fluid_flag.get_nz();
if (i < nx && j < ny && k < nz)
{
if (LIQUID == fluid_flag.get(i, j, k))
{
int cid = cell_index.get(i, j, k);
int c = 1;
c += is_fluid(fluid_flag, i, j, k - 1, nx, ny, nz);
c += is_fluid(fluid_flag, i, j - 1, k, nx, ny, nz);
c += is_fluid(fluid_flag, i - 1, j, k, nx, ny, nz);
c += is_fluid(fluid_flag, i + 1, j, k, nx, ny, nz);
c += is_fluid(fluid_flag, i, j + 1, k, nx, ny, nz);
c += is_fluid(fluid_flag, i, j, k + 1, nx, ny, nz);
count[cid] = c;
}
}
}
__global__ void formPoisson_build_matrix(CONST grid_cell<char> fluid_flag,
CONST grid_cell<int> cell_index,
CONST int* I,
int* J,
float* val)
{
KERNAL_CONFIG
// build consistent ordering
int nx = fluid_flag.get_nx();
int ny = fluid_flag.get_ny();
int nz = fluid_flag.get_nz();
if (i < nx && j < ny && k < nz)
{
if (LIQUID == fluid_flag.get(i, j, k))
{
int cid = cell_index.get(i, j, k);
int NZ = I[cid];
if (is_fluid(fluid_flag, i, j, k - 1, nx, ny, nz))
{
J[NZ] = cell_index.get(i, j, k - 1);
val[NZ] = -1.0f;
NZ++;
}
if (is_fluid(fluid_flag, i, j - 1, k, nx, ny, nz))
{
J[NZ] = cell_index.get(i, j - 1, k);
val[NZ] = -1.0f;
NZ++;
}
if (is_fluid(fluid_flag, i - 1, j, k, nx, ny, nz))
{
J[NZ] = cell_index.get(i - 1, j, k);
val[NZ] = -1.0f;
NZ++;
}
{
J[NZ] = cell_index.get(i, j, k);
val[NZ] = get_Ldiag(fluid_flag, i, j, k, nx, ny, nz);
NZ++;
}
if (is_fluid(fluid_flag, i + 1, j, k, nx, ny, nz))
{
J[NZ] = cell_index.get(i + 1, j, k);
val[NZ] = -1.0f;
NZ++;
}
if (is_fluid(fluid_flag, i, j + 1, k, nx, ny, nz))
{
J[NZ] = cell_index.get(i, j + 1, k);
val[NZ] = -1.0f;
NZ++;
}
if (is_fluid(fluid_flag, i, j, k + 1, nx, ny, nz))
{
J[NZ] = cell_index.get(i, j, k + 1);
val[NZ] = -1.0f;
NZ++;
}
}
}
}
__global__ void downsample_f(
grid_cell<char> f_fine, grid_cell<char> f_coarse, int nx, int ny, int nz)
{
KERNAL_CONFIG
if (i < nx && j < ny && k < nz)
{
int i2 = i * 2;
int j2 = j * 2;
int k2 = k * 2;
if (AIR == f_fine.get(i2, j2, k2) || //
AIR == f_fine.get(i2 + 1, j2, k2) || //
AIR == f_fine.get(i2, j2 + 1, k2) || //
AIR == f_fine.get(i2 + 1, j2 + 1, k2) || //
AIR == f_fine.get(i2, j2, k2 + 1) || //
AIR == f_fine.get(i2 + 1, j2, k2 + 1) || //
AIR == f_fine.get(i2, j2 + 1, k2 + 1) || //
AIR == f_fine.get(i2 + 1, j2 + 1, k2 + 1))
{
f_coarse.get(i, j, k) = AIR;
}
else if (LIQUID == f_fine.get(i2, j2, k2) || //
LIQUID == f_fine.get(i2 + 1, j2, k2) || //
LIQUID == f_fine.get(i2, j2 + 1, k2) || //
LIQUID == f_fine.get(i2 + 1, j2 + 1, k2) || //
LIQUID == f_fine.get(i2, j2, k2 + 1) || //
LIQUID == f_fine.get(i2 + 1, j2, k2 + 1) || //
LIQUID == f_fine.get(i2, j2 + 1, k2 + 1) || //
LIQUID == f_fine.get(i2 + 1, j2 + 1, k2 + 1))
{
f_coarse.get(i, j, k) = LIQUID;
}
else
{
f_coarse.get(i, j, k) = SOLID;
}
}
}
__global__ void restrict_(grid_cell<float> r_fine,
grid_cell<char> f_fine,
grid_cell<float> z_fine,
grid_cell<float> r_coarse,
CONST int nx,
CONST int ny,
CONST int nz)
{
KERNAL_CONFIG
if (i < nx && j < ny && k < nz && LIQUID == f_fine.get(i, j, k))
{
float Az = get_Ldiag(f_fine, i, j, k, nx, ny, nz) * z_fine.get(i, j, k);
Az -= neighbor_sum(z_fine, f_fine, i, j, k, nx, ny, nz);
float res = r_fine.get(i, j, k) - Az;
atomicAdd(&r_coarse.get(i / 2, j / 2, k / 2), res * 0.5f);
}
}
__global__ void prolongate(grid_cell<float> z_fine,
grid_cell<float> z_coarse,
CONST int nx,
CONST int ny,
CONST int nz)
{
KERNAL_CONFIG
if (i < nx && j < ny && k < nz)
{
z_fine.get(i, j, k) += z_coarse.get(i / 2, j / 2, k / 2);
}
}
__global__ void calc_Ap_kernel(grid_cell<float> Ap,
grid_cell<float> p,
grid_cell<char> f,
CONST int nx,
CONST int ny,
CONST int nz)
{
KERNAL_CONFIG
if (i < nx && j < ny && k < nz && LIQUID == f.get(i, j, k))
{
float _Ap = get_Ldiag(f, i, j, k, nx, ny, nz) * p.get(i, j, k);
_Ap -= neighbor_sum(p, f, i, j, k, nx, ny, nz);
Ap.get(i, j, k) = _Ap;
}
}
__global__ void calc_saxpy_kernel(grid_cell<float> x,
grid_cell<float> y,
grid_cell<char> f,
const float a,
CONST int nx,
CONST int ny,
CONST int nz)
{
KERNAL_CONFIG
if (i < nx && j < ny && k < nz && LIQUID == f.get(i, j, k))
{
y.get(i, j, k) += a * x.get(i, j, k);
}
}
__global__ void calc_sxpay_kernel(grid_cell<float> x,
grid_cell<float> y,
grid_cell<char> f,
const float a,
CONST int nx,
CONST int ny,
CONST int nz)
{
KERNAL_CONFIG
if (i < nx && j < ny && k < nz && LIQUID == f.get(i, j, k))
{
y.get(i, j, k) = x.get(i, j, k) + a * y.get(i, j, k);
}
}
class MGPCGSolver
{
protected:
int nx, ny, nz;
int max_iters;
int n_mg_levels;
int n_pre_and_pose_smoothing;
int n_bottom_smoothing;
bool use_precon;
std::vector<grid_cell<float>> __r;
std::vector<grid_cell<float>> __z;
std::vector<grid_cell<char>> __f;
cublasHandle_t cublasHandle = 0;
cusparseHandle_t cusparseHandle = 0;
public:
MGPCGSolver(int nx_, int ny_, int nz_) : nx(nx_), ny(ny_), nz(nz_)
{
max_iters = 100;
// n_mg_levels = 4;
n_mg_levels = 5; // reduce to RGBS preconditioning if =1
// for low res grid
// n_pre_and_pose_smoothing = 2;
// n_bottom_smoothing = 10;
// for high res grid
n_pre_and_pose_smoothing = 4;
n_bottom_smoothing = 30;
use_precon = true;
auto get_res = [this](int level) {
return make_int3(
nx / (1 << level), ny / (1 << level), nz / (1 << level));
};
__r.resize(n_mg_levels);
__z.resize(n_mg_levels);
__f.resize(n_mg_levels);
// no level 0
for (int l = 1; l < n_mg_levels; l++)
{
auto res = get_res(l);
__r[l].init_gpu(res.x, res.y, res.z);
__z[l].init_gpu(res.x, res.y, res.z);
__f[l].init_gpu(res.x, res.y, res.z);
}
checkCudaErrors(cublasCreate(&cublasHandle));
checkCudaErrors(cusparseCreate(&cusparseHandle));
}
~MGPCGSolver()
{
// no level 0
for (int l = 1; l < n_mg_levels; l++)
{
__r[l].free_gpu();
__z[l].free_gpu();
__f[l].free_gpu();
}
cublasDestroy(cublasHandle);
cusparseDestroy(cusparseHandle);
}
void prepare_preconditioner(grid_cell<float>& r0_buffer,
grid_cell<float>& z0_buffer,
grid_cell<char>& f0_buffer)
{
//__r[0].init_ref_gpu(nx, ny, nz, r0_buffer.get_ptr());
//__z[0].init_ref_gpu(nx, ny, nz, z0_buffer.get_ptr());
//__f[0].init_ref_gpu(nx, ny, nz, f0_buffer.get_ptr());
__r[0] = r0_buffer.cast<grid_cell<float>>(nx, ny, nz);
__z[0] = z0_buffer.cast<grid_cell<float>>(nx, ny, nz);
__f[0] = f0_buffer.cast<grid_cell<char>>(nx, ny, nz);
}
void apply_preconditioner()
{
dim3 block(8, 8, 8);
__z[0].clear_gpu();
// pre smoothing
for (int level = 0; level < n_mg_levels - 1; level++)
{
int dim_x = nx / (1 << level);
int dim_y = ny / (1 << level);
int dim_z = nz / (1 << level);
dim3 grid(divUp(dim_x, block.x),
divUp(dim_y, block.y),
divUp(dim_z, block.z));
for (int i = 0; i < n_pre_and_pose_smoothing; i++)
{
for (int phase = 0; phase < 2; phase++)
smooth<<<grid, block>>>(__z[level],
__r[level],
__f[level],
dim_x,
dim_y,
dim_z,
phase);
}
__z[level + 1].clear_gpu();
__r[level + 1].clear_gpu();
restrict_<<<grid, block>>>(__r[level],
__f[level],
__z[level],
__r[level + 1],
dim_x,
dim_y,
dim_z);
}
// bottom smoothing
{
int halfcount = n_bottom_smoothing / 2;
int level = n_mg_levels - 1;
int dim_x = nx / (1 << level);
int dim_y = ny / (1 << level);
int dim_z = nz / (1 << level);
dim3 grid(divUp(dim_x, block.x),
divUp(dim_y, block.y),
divUp(dim_z, block.z));
for (int order = 0; order < 2; order++)
{
for (int i = 0; i < halfcount; i++)
{
for (int phase = 0; phase < 2; phase++)
smooth<<<grid, block>>>(__z[level],
__r[level],
__f[level],
dim_x,
dim_y,
dim_z,
(phase + order) % 2);
}
}
}
// post smoothing
for (int level = n_mg_levels - 2; level >= 0; level--)
{
int dim_x = nx / (1 << level);
int dim_y = ny / (1 << level);
int dim_z = nz / (1 << level);
dim3 grid(divUp(dim_x, block.x),
divUp(dim_y, block.y),
divUp(dim_z, block.z));
prolongate<<<grid, block>>>(
__z[level], __z[level + 1], dim_x, dim_y, dim_z);
for (int i = 0; i < n_pre_and_pose_smoothing; i++)
{
for (int phase = 0; phase < 2; phase++)
smooth<<<grid, block>>>(__z[level],
__r[level],
__f[level],
dim_x,
dim_y,
dim_z,
phase);
}
}
}
float calc_dot(grid_cell<float>& a, grid_cell<float>& b)
{
dim3 block(8, 8, 8);
dim3 grid(divUp(nx, block.x), divUp(ny, block.y), divUp(nz, block.z));
regularize<<<grid, block>>>(a, __f[0], nx, ny, nz);
regularize<<<grid, block>>>(b, __f[0], nx, ny, nz);
float dot;
cublasSdot(
cublasHandle, nx * ny * nz, a.get_ptr(), 1, b.get_ptr(), 1, &dot);
return dot;
}
void calc_Ap(grid_cell<float>& Ap,
grid_cell<float>& p,
CONST int nx,
CONST int ny,
CONST int nz)
{
dim3 block(8, 8, 8);
dim3 grid(divUp(nx, block.x), divUp(ny, block.y), divUp(nz, block.z));
regularize<<<grid, block>>>(p, __f[0], nx, ny, nz);
calc_Ap_kernel<<<grid, block>>>(Ap, p, __f[0], nx, ny, nz);
regularize<<<grid, block>>>(Ap, __f[0], nx, ny, nz);
}
void calc_saxpy(grid_cell<float>& x,
grid_cell<float>& y,
const float a,
CONST int nx,
CONST int ny,
CONST int nz)
{
dim3 block(8, 8, 8);
dim3 grid(divUp(nx, block.x), divUp(ny, block.y), divUp(nz, block.z));
regularize<<<grid, block>>>(x, __f[0], nx, ny, nz);
regularize<<<grid, block>>>(y, __f[0], nx, ny, nz);
calc_saxpy_kernel<<<grid, block>>>(x, y, __f[0], a, nx, ny, nz);
}
void calc_sxpay(grid_cell<float>& x,
grid_cell<float>& y,
const float a,
CONST int nx,
CONST int ny,
CONST int nz)
{
dim3 block(8, 8, 8);
dim3 grid(divUp(nx, block.x), divUp(ny, block.y), divUp(nz, block.z));
regularize<<<grid, block>>>(x, __f[0], nx, ny, nz);
regularize<<<grid, block>>>(y, __f[0], nx, ny, nz);
calc_sxpay_kernel<<<grid, block>>>(x, y, __f[0], a, nx, ny, nz);
}
void solve(grid_cell<float>& d_pressure,
grid_cell<float>& d_rhs,
grid_cell<float>& d_sdistance,
grid_cell<char>& d_fluid_flag,
grid_cell<float>& d_temp_0,
grid_cell<float>& d_temp_1,
grid_cell<float>& d_temp_2,
const int nx,
const int ny,
const int nz)
{
dim3 block(8, 8, 8);
dim3 grid(divUp(nx, block.x), divUp(ny, block.y), divUp(nz, block.z));
float r0, r1, alpha, beta;
float dot, nalpha;
int k;
const int max_iter = 1000;
const float tol = 1e-5f;
int precon = 2;
// Profiler _p1;
prepare_preconditioner(d_rhs, d_temp_2, d_fluid_flag);
auto& __x = d_pressure;
auto& __p = d_temp_0;
auto& __Ap = d_temp_1;
__x.clear_gpu();
if (precon == 2)
{
for (int level = 1; level < n_mg_levels; level++)
{
int dim_x = nx / (1 << level);
int dim_y = ny / (1 << level);
int dim_z = nz / (1 << level);
dim3 grid(divUp(dim_x, block.x),
divUp(dim_y, block.y),
divUp(dim_z, block.z));
downsample_f<<<grid, block>>>(
__f[level - 1], __f[level], dim_x, dim_y, dim_z);
}
}
//////////////////////////////////////////////////////////////////////////
// in case the rhs is zero when all fluid is free falling
{
// r' * r
dot = calc_dot(__r[0], __r[0]);
if (dot <= tol * tol)
{
return;
}
}
cusparseMatDescr_t descr = 0;
checkCudaErrors(cusparseCreateMatDescr(&descr));
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
printf("\tConvergence of conjugate gradient: \n");
//////////////////////////////////////////////////////////////////////////
if (precon == 0)
{
// z := r
__z[0].copy_from_gpu(__r[0]);
}
else if (precon == 2)
{
apply_preconditioner();
}
else
{
printf("invalid precon\n");
throw std::runtime_error("invalid precon");
}
// p := z
__p.copy_from_gpu(__z[0]);
// r' * z
r1 = calc_dot(__r[0], __z[0]);
k = 0;
while (k++ < max_iter)
{
// A * p
calc_Ap(__Ap, __p, nx, ny, nz);
// p' * A * p
dot = calc_dot(__p, __Ap);
alpha = r1 / dot;
// x + a * p
calc_saxpy(__p, __x, alpha, nx, ny, nz);
nalpha = -alpha;
// r - a * A * p
calc_saxpy(__Ap, __r[0], nalpha, nx, ny, nz);
// r' * r
dot = calc_dot(__r[0], __r[0]);
if (dot <= tol * tol) break;
if (precon == 0)
{
// z := r
__z[0].copy_from_gpu(__r[0]);
}
else if (precon == 2)
{
apply_preconditioner();
}
else
{
printf("invalid precon\n");
throw std::runtime_error("invalid precon");
}
r0 = r1;
// r' * z
r1 = calc_dot(__r[0], __z[0]);
beta = r1 / r0;
// z + b * p
calc_sxpay(__z[0], __p, beta, nx, ny, nz);
}
__sync();
printf("\titeration = %3d, residual = %e \n", k, sqrt(r1));
//////////////////////////////////////////////////////////////////////////
regularize<<<grid, block>>>(__x, __f[0], nx, ny, nz);
}
}; // namespace pcg
void pcg_solve_poisson_gpu(grid_cell<float>& d_pressure,
grid_cell<float>& d_rhs,
grid_cell<float>& d_sdistance,
grid_cell<char>& d_fluid_flag,
grid_cell<float>& d_temp_buffer_0,
grid_cell<float>& d_temp_buffer_1,
grid_cell<float>& d_temp_buffer_2,
const int nx,
const int ny,
const int nz)
{
static MGPCGSolver solver(nx, ny, nz);
solver.solve(d_pressure,
d_rhs,
d_sdistance,
d_fluid_flag,
d_temp_buffer_0,
d_temp_buffer_1,
d_temp_buffer_2,
nx,
ny,
nz);
}
} // namespace pcg
|
ed00604156403ecae667c83c7f3e118bab46c9f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "gemm_utils.h"
void checkKernelsErrors(const char *prefix, const char *postfix)
{
hipDeviceSynchronize();
if(hipPeekAtLastError() != hipSuccess){
printf("\n%s Line %d: %s %s\n", prefix, __LINE__,
hipGetErrorString(hipGetLastError()),
postfix);
hipDeviceReset();
exit(1);
}
return;
}
| ed00604156403ecae667c83c7f3e118bab46c9f0.cu | #include "gemm_utils.h"
void checkKernelsErrors(const char *prefix, const char *postfix)
{
cudaDeviceSynchronize();
if(cudaPeekAtLastError() != cudaSuccess){
printf("\n%s Line %d: %s %s\n", prefix, __LINE__,
cudaGetErrorString(cudaGetLastError()),
postfix);
cudaDeviceReset();
exit(1);
}
return;
}
|
8115b6f42593a7e977699d9b77b03a15bd8b2a6d.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @internal
* @author Oded Green <br>
* Georgia Institute of Technology, Computational Science and Engineering <br>
* [email protected]
* @date August, 2017
* @version v2
*
* @copyright Copyright 2017 cuStinger. All rights reserved.
*
* @license{<blockquote>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* </blockquote>}
*
* @file
*/
#include "Dynamic/KatzCentrality/Katz.cuh"
#include "KatzOperators.cuh"
namespace hornet_alg {
KatzCentralityDynamic::KatzCentralityDynamic(HornetGPU& hornet,
HornetGPU& inverted_graph,
int max_iteration, int K,
degree_t max_degree) :
StaticAlgorithm(hornet),
load_balacing(hornet),
inverted_graph(inverted_graph),
is_directed(false),
kc_static(hornet, max_iteration, K,
max_degree, false) {
hd_katzdata().active_queue.initilize(hornet);
gpu::allocate(hd_katzdata().new_paths_curr, hornet.nV());
gpu::allocate(hd_katzdata().new_paths_prev, hornet.nV());
gpu::allocate(hd_katzdata().active, hornet.nV());
hd_katzdata = kc_static.katz_data();
std::cout << "Oded remember to take care of memory de-allocation\n"
<< "Oded need to figure out correct API for dynamic graph"
<< "algorithms\n"
<< "Dynamic katz centrality algorithm needs to get both the"
<< "original graph and the inverted graph for directed graphs"
<< std::endl;
}
KatzCentralityDynamic::KatzCentralityDynamic(HornetGPU& hornet,
int max_iteration, int K,
degree_t max_degree) :
StaticAlgorithm(hornet),
load_balacing(hornet),
inverted_graph(inverted_graph),
is_directed(true),
kc_static(inverted_graph, max_iteration, K,
max_degree, true) {
hd_katzdata().active_queue.initilize(hornet);
gpu::allocate(hd_katzdata().new_paths_curr, hornet.nV());
gpu::allocate(hd_katzdata().new_paths_prev, hornet.nV());
gpu::allocate(hd_katzdata().active, hornet.nV());
hd_katzdata = kc_static.katz_data();
std::cout << "Oded remember to take care of memory de-allocation\n"
<< "Oded need to figure out correct API for dynamic graph"
<< "algorithms\n"
<< "Dynamic katz centrality algorithm needs to get both the"
<< "original graph and the inverted graph for directed graphs"
<< std::endl;
}
KatzCentralityDynamic::~KatzCentralityDynamic() {
release();
}
void KatzCentralityDynamic::run_static() {
// Executing the static graph algorithm
kc_static.reset();
kc_static.run();
hd_katzdata().iteration_static = hd_katzdata().iteration;
// Initializing the fields of the dynamic graph algorithm
forAllnumV(hornet, InitStreaming { hd_katzdata } );
}
void KatzCentralityDynamic::release(){
gpu::free(hd_katzdata().new_paths_curr);
gpu::free(hd_katzdata().new_paths_prev);
gpu::free(hd_katzdata().active);
}
//==============================================================================
void KatzCentralityDynamic::processUpdate(BatchUpdate& batch_update,
bool is_insert) {
// Resetting the queue of the active vertices.
hd_katzdata().active_queue.clear();
hd_katzdata().iteration = 1;
// Initialization of insertions or deletions is slightly different.
if (is_insert)
forAllEdges(hornet, batch_update, SetupInsertions { hd_katzdata });
else
forAllEdges(hornet, batch_update, SetupDeletions { hd_katzdata } );
hd_katzdata.sync();
hd_katzdata().iteration = 2;
while (hd_katzdata().iteration < hd_katzdata().max_iteration &&
hd_katzdata().iteration < hd_katzdata().iteration_static) {
hd_katzdata().alphaI = ::pow(hd_katzdata().alpha,
hd_katzdata().iteration);
forAll(hd_katzdata().active_queue, //hd_katzdata().num_active
InitActiveNewPaths { hd_katzdata });
// Undirected graphs and directed graphs need to be dealt with differently.
if (!is_directed) {
forAllEdges(hornet, hd_katzdata().active_queue,
FindNextActive { hd_katzdata }, load_balacing);
hd_katzdata.sync(); // Syncing queue info
forAllEdges(hornet, hd_katzdata().active_queue,
UpdateActiveNewPaths { hd_katzdata },
load_balacing );
}
else {
forAllEdges(inverted_graph, hd_katzdata().active_queue,
FindNextActive { hd_katzdata }, load_balacing);
hd_katzdata.sync();
forAllEdges(inverted_graph, hd_katzdata().active_queue,
UpdateActiveNewPaths { hd_katzdata }, load_balacing);
}
hd_katzdata.sync(); // Syncing queue info
// Checking if we are dealing with a batch of insertions or deletions.
if (is_insert) {
forAllEdges(hornet, batch_update,
UpdateNewPathsBatchInsert { hd_katzdata });
}
else {
forAllEdges(hornet, batch_update,
UpdateNewPathsBatchDelete { hd_katzdata });
}
hd_katzdata.sync();
forAll(hd_katzdata().active_queue, UpdatePrevWithCurr { hd_katzdata });
hd_katzdata.sync();
hd_katzdata().iteration++;
}
if (hd_katzdata().iteration > 2) {
forAll(hd_katzdata().active_queue, //hd_katzdata().num_active?
UpdateLastIteration { hd_katzdata } );
hd_katzdata.sync();
}
// Resetting the fields of the dynamic graph algorithm for all the vertices
// that were active
//hd_katzdata().num_active ??
forAll(hd_katzdata().active_queue, InitStreaming {hd_katzdata});
}
//------------------------------------------------------------------------------
int KatzCentralityDynamic::get_iteration_count(){
return hd_katzdata().iteration;
}
void KatzCentralityDynamic::batchUpdateInserted(BatchUpdate &batch_update) {
processUpdate(batch_update, true);
}
void KatzCentralityDynamic::batchUpdateDeleted(BatchUpdate &batch_update) {
processUpdate(batch_update, false);
}
void KatzCentralityDynamic::copyKCToHost(double* host_array) {
kc_static.copyKCToHost(host_array);
}
void KatzCentralityDynamic::copyNumPathsToHost(ulong_t* host_array) {
kc_static.copyNumPathsToHost(host_array);
}
}// cuStingerAlgs namespace
| 8115b6f42593a7e977699d9b77b03a15bd8b2a6d.cu | /**
* @internal
* @author Oded Green <br>
* Georgia Institute of Technology, Computational Science and Engineering <br>
* [email protected]
* @date August, 2017
* @version v2
*
* @copyright Copyright © 2017 cuStinger. All rights reserved.
*
* @license{<blockquote>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* </blockquote>}
*
* @file
*/
#include "Dynamic/KatzCentrality/Katz.cuh"
#include "KatzOperators.cuh"
namespace hornet_alg {
KatzCentralityDynamic::KatzCentralityDynamic(HornetGPU& hornet,
HornetGPU& inverted_graph,
int max_iteration, int K,
degree_t max_degree) :
StaticAlgorithm(hornet),
load_balacing(hornet),
inverted_graph(inverted_graph),
is_directed(false),
kc_static(hornet, max_iteration, K,
max_degree, false) {
hd_katzdata().active_queue.initilize(hornet);
gpu::allocate(hd_katzdata().new_paths_curr, hornet.nV());
gpu::allocate(hd_katzdata().new_paths_prev, hornet.nV());
gpu::allocate(hd_katzdata().active, hornet.nV());
hd_katzdata = kc_static.katz_data();
std::cout << "Oded remember to take care of memory de-allocation\n"
<< "Oded need to figure out correct API for dynamic graph"
<< "algorithms\n"
<< "Dynamic katz centrality algorithm needs to get both the"
<< "original graph and the inverted graph for directed graphs"
<< std::endl;
}
KatzCentralityDynamic::KatzCentralityDynamic(HornetGPU& hornet,
int max_iteration, int K,
degree_t max_degree) :
StaticAlgorithm(hornet),
load_balacing(hornet),
inverted_graph(inverted_graph),
is_directed(true),
kc_static(inverted_graph, max_iteration, K,
max_degree, true) {
hd_katzdata().active_queue.initilize(hornet);
gpu::allocate(hd_katzdata().new_paths_curr, hornet.nV());
gpu::allocate(hd_katzdata().new_paths_prev, hornet.nV());
gpu::allocate(hd_katzdata().active, hornet.nV());
hd_katzdata = kc_static.katz_data();
std::cout << "Oded remember to take care of memory de-allocation\n"
<< "Oded need to figure out correct API for dynamic graph"
<< "algorithms\n"
<< "Dynamic katz centrality algorithm needs to get both the"
<< "original graph and the inverted graph for directed graphs"
<< std::endl;
}
KatzCentralityDynamic::~KatzCentralityDynamic() {
release();
}
void KatzCentralityDynamic::run_static() {
// Executing the static graph algorithm
kc_static.reset();
kc_static.run();
hd_katzdata().iteration_static = hd_katzdata().iteration;
// Initializing the fields of the dynamic graph algorithm
forAllnumV(hornet, InitStreaming { hd_katzdata } );
}
void KatzCentralityDynamic::release(){
gpu::free(hd_katzdata().new_paths_curr);
gpu::free(hd_katzdata().new_paths_prev);
gpu::free(hd_katzdata().active);
}
//==============================================================================
void KatzCentralityDynamic::processUpdate(BatchUpdate& batch_update,
bool is_insert) {
// Resetting the queue of the active vertices.
hd_katzdata().active_queue.clear();
hd_katzdata().iteration = 1;
// Initialization of insertions or deletions is slightly different.
if (is_insert)
forAllEdges(hornet, batch_update, SetupInsertions { hd_katzdata });
else
forAllEdges(hornet, batch_update, SetupDeletions { hd_katzdata } );
hd_katzdata.sync();
hd_katzdata().iteration = 2;
while (hd_katzdata().iteration < hd_katzdata().max_iteration &&
hd_katzdata().iteration < hd_katzdata().iteration_static) {
hd_katzdata().alphaI = std::pow(hd_katzdata().alpha,
hd_katzdata().iteration);
forAll(hd_katzdata().active_queue, //hd_katzdata().num_active
InitActiveNewPaths { hd_katzdata });
// Undirected graphs and directed graphs need to be dealt with differently.
if (!is_directed) {
forAllEdges(hornet, hd_katzdata().active_queue,
FindNextActive { hd_katzdata }, load_balacing);
hd_katzdata.sync(); // Syncing queue info
forAllEdges(hornet, hd_katzdata().active_queue,
UpdateActiveNewPaths { hd_katzdata },
load_balacing );
}
else {
forAllEdges(inverted_graph, hd_katzdata().active_queue,
FindNextActive { hd_katzdata }, load_balacing);
hd_katzdata.sync();
forAllEdges(inverted_graph, hd_katzdata().active_queue,
UpdateActiveNewPaths { hd_katzdata }, load_balacing);
}
hd_katzdata.sync(); // Syncing queue info
// Checking if we are dealing with a batch of insertions or deletions.
if (is_insert) {
forAllEdges(hornet, batch_update,
UpdateNewPathsBatchInsert { hd_katzdata });
}
else {
forAllEdges(hornet, batch_update,
UpdateNewPathsBatchDelete { hd_katzdata });
}
hd_katzdata.sync();
forAll(hd_katzdata().active_queue, UpdatePrevWithCurr { hd_katzdata });
hd_katzdata.sync();
hd_katzdata().iteration++;
}
if (hd_katzdata().iteration > 2) {
forAll(hd_katzdata().active_queue, //hd_katzdata().num_active?
UpdateLastIteration { hd_katzdata } );
hd_katzdata.sync();
}
// Resetting the fields of the dynamic graph algorithm for all the vertices
// that were active
//hd_katzdata().num_active ??
forAll(hd_katzdata().active_queue, InitStreaming {hd_katzdata});
}
//------------------------------------------------------------------------------
int KatzCentralityDynamic::get_iteration_count(){
return hd_katzdata().iteration;
}
void KatzCentralityDynamic::batchUpdateInserted(BatchUpdate &batch_update) {
processUpdate(batch_update, true);
}
void KatzCentralityDynamic::batchUpdateDeleted(BatchUpdate &batch_update) {
processUpdate(batch_update, false);
}
void KatzCentralityDynamic::copyKCToHost(double* host_array) {
kc_static.copyKCToHost(host_array);
}
void KatzCentralityDynamic::copyNumPathsToHost(ulong_t* host_array) {
kc_static.copyNumPathsToHost(host_array);
}
}// cuStingerAlgs namespace
|
fed5d47223e86c5dce9a2dfa64ebc6561188ca81.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdint.h>
#include<stdio.h>
__global__ void convolution_kernel(const uint8_t *d_source, uint8_t *d_target,
const int width, const int height,
const float *d_stancil,
const int st_width,
const int st_height)
{
//boundaries checking for width
if ((blockDim.x * blockIdx.x)+ threadIdx.x > (width-1))
{
return;
}
//
if ((blockDim.y * blockIdx.y)+ threadIdx.y > (height-1))
{
return;
}
int x,y,localX,localY,final_idx,pixX,pixY, st_idx;
//lets compute the coordinate of the pixel we are computing
pixX = (blockIdx.x*blockDim.x) + threadIdx.x;
pixY = (blockIdx.y*blockDim.y) + threadIdx.y;
int idx = ((pixY *width) + (pixX)) *3;
//computing the center of the filter
int center_x = (int)(st_width/2.0);
int center_y = (int)(st_height/2.0);
//allocating/initializing color variables
float colorR = 0,colorG = 0,colorB = 0;
//looping the height of the filter
for (y=0; y<st_height; ++y)
{
localY = y - center_y;
//looping the weidth of the filter
for (x=0;x<st_width; ++x)
{
//lets compute where in the filter we are, computiing local
//coordinate from the center
localX = x - center_x;
//boundary check
if (( (localX + pixX) >= 0 && ((localX+pixX) < width)) &&
(localY+pixY >= 0 && ((localY+pixY) < height)))
{
//compute the final pixel to sample taking in to account
//the offset of the filter
final_idx = idx + ((localX*3) + (localY*width*3));
//compute the filter index buffer
st_idx = x+ (y*st_width);
colorR += float(d_source[final_idx])*d_stancil[st_idx];
colorG += float(d_source[final_idx+1])*d_stancil[st_idx];
colorB += float(d_source[final_idx+2])*d_stancil[st_idx];
}//end of stencil boundary checking
}//end of looping filter width
}//end of looping filter height
//setting the color to final buffer
d_target[idx] = (uint8_t)min(255.0f,max(0.0f,colorR));
d_target[idx+1] = (uint8_t)min(255.0f,max(0.0f,colorG));
d_target[idx+2] = (uint8_t)min(255.0f,max(0.0f,colorB));
}
void run_convolution_kernel( uint8_t *d_source, uint8_t *d_target,
const size_t width, const size_t height,
const float *d_stancil,
const size_t st_width,
const size_t st_height)
{
const int grainSize=16;
int width_blocks,width_height;
//computing the block size
width_blocks = ((width%grainSize) != 0)?(width/grainSize) +1: (width/grainSize);
width_height = ((height%grainSize) != 0)?(height/grainSize) +1: (height/grainSize);
//setupping the block and grids
const dim3 blockSize( grainSize, grainSize , 1);
const dim3 gridSize( width_blocks, width_height, 1);
//calling the actual kernel
hipLaunchKernelGGL(( convolution_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_source,
d_target,
width,
height,
d_stancil,
st_width,
st_height);
//sincronizing device
hipDeviceSynchronize();
//checking for error
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
} | fed5d47223e86c5dce9a2dfa64ebc6561188ca81.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdint.h>
#include<stdio.h>
__global__ void convolution_kernel(const uint8_t *d_source, uint8_t *d_target,
const int width, const int height,
const float *d_stancil,
const int st_width,
const int st_height)
{
//boundaries checking for width
if ((blockDim.x * blockIdx.x)+ threadIdx.x > (width-1))
{
return;
}
//
if ((blockDim.y * blockIdx.y)+ threadIdx.y > (height-1))
{
return;
}
int x,y,localX,localY,final_idx,pixX,pixY, st_idx;
//lets compute the coordinate of the pixel we are computing
pixX = (blockIdx.x*blockDim.x) + threadIdx.x;
pixY = (blockIdx.y*blockDim.y) + threadIdx.y;
int idx = ((pixY *width) + (pixX)) *3;
//computing the center of the filter
int center_x = (int)(st_width/2.0);
int center_y = (int)(st_height/2.0);
//allocating/initializing color variables
float colorR = 0,colorG = 0,colorB = 0;
//looping the height of the filter
for (y=0; y<st_height; ++y)
{
localY = y - center_y;
//looping the weidth of the filter
for (x=0;x<st_width; ++x)
{
//lets compute where in the filter we are, computiing local
//coordinate from the center
localX = x - center_x;
//boundary check
if (( (localX + pixX) >= 0 && ((localX+pixX) < width)) &&
(localY+pixY >= 0 && ((localY+pixY) < height)))
{
//compute the final pixel to sample taking in to account
//the offset of the filter
final_idx = idx + ((localX*3) + (localY*width*3));
//compute the filter index buffer
st_idx = x+ (y*st_width);
colorR += float(d_source[final_idx])*d_stancil[st_idx];
colorG += float(d_source[final_idx+1])*d_stancil[st_idx];
colorB += float(d_source[final_idx+2])*d_stancil[st_idx];
}//end of stencil boundary checking
}//end of looping filter width
}//end of looping filter height
//setting the color to final buffer
d_target[idx] = (uint8_t)min(255.0f,max(0.0f,colorR));
d_target[idx+1] = (uint8_t)min(255.0f,max(0.0f,colorG));
d_target[idx+2] = (uint8_t)min(255.0f,max(0.0f,colorB));
}
void run_convolution_kernel( uint8_t *d_source, uint8_t *d_target,
const size_t width, const size_t height,
const float *d_stancil,
const size_t st_width,
const size_t st_height)
{
const int grainSize=16;
int width_blocks,width_height;
//computing the block size
width_blocks = ((width%grainSize) != 0)?(width/grainSize) +1: (width/grainSize);
width_height = ((height%grainSize) != 0)?(height/grainSize) +1: (height/grainSize);
//setupping the block and grids
const dim3 blockSize( grainSize, grainSize , 1);
const dim3 gridSize( width_blocks, width_height, 1);
//calling the actual kernel
convolution_kernel<<<gridSize, blockSize>>>(d_source,
d_target,
width,
height,
d_stancil,
st_width,
st_height);
//sincronizing device
cudaDeviceSynchronize();
//checking for error
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
} |
784223576c2d61a74e018eca351c123bdb04fe57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* .cuda.cu - Copyright 2019/2020 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include ".cuda.h"
namespace lh2core
{
// path tracing buffers and global variables
__constant__ CoreInstanceDesc* instanceDescriptors;
__constant__ CUDAMaterial* materials;
__constant__ CoreLightTri* areaLights;
__constant__ CorePointLight* pointLights;
__constant__ CoreSpotLight* spotLights;
__constant__ CoreDirectionalLight* directionalLights;
__constant__ int4 lightCounts; // area, point, spot, directional
__constant__ uchar4* argb32;
__constant__ float4* argb128;
__constant__ uchar4* nrm32;
__constant__ float4* skyPixels;
__constant__ int skywidth;
__constant__ int skyheight;
__constant__ PathState* pathStates;
__constant__ float4* debugData;
__constant__ mat4 worldToSky;
// path tracer settings
__constant__ __device__ float geometryEpsilon;
__constant__ __device__ float clampValue;
// staging: copies will be batched and carried out after rendering completes,
// to allow the CPU to update the scene concurrently with GPU rendering.
enum { INSTS = 0, MATS, ALGHTS, PLGHTS, SLGHTS, DLGHTS, LCNTS, RGB32, RGBH, NRMLS, SKYPIX, SKYW, SKYH, SMAT, DBGDAT, GEPS, CLMPV };
// device pointers are not real pointers for nvcc, so we need a bit of a hack.
struct StagedPtr { void* p; int id; };
struct StagedInt { int v; int id; };
struct StagedInt4 { int4 v; int id; };
struct StagedMat { mat4 v; int id; };
struct StagedF32 { float v; int id; };
struct StagedCpy { void* d; void* s; int n; };
static std::vector<StagedPtr> stagedPtr;
static std::vector<StagedInt> stagedInt;
static std::vector<StagedInt4> stagedInt4;
static std::vector<StagedMat> stagedMat;
static std::vector<StagedF32> stagedF32;
static std::vector<StagedCpy> stagedCpy;
__host__ static void pushPtrCpy( int id, void* p )
{
if (id == INSTS) hipMemcpyToSymbol( instanceDescriptors, &p, sizeof( void* ) );
if (id == MATS) hipMemcpyToSymbol( materials, &p, sizeof( void* ) );
if (id == ALGHTS) hipMemcpyToSymbol( areaLights, &p, sizeof( void* ) );
if (id == PLGHTS) hipMemcpyToSymbol( pointLights, &p, sizeof( void* ) );
if (id == SLGHTS) hipMemcpyToSymbol( spotLights, &p, sizeof( void* ) );
if (id == DLGHTS) hipMemcpyToSymbol( directionalLights, &p, sizeof( void* ) );
if (id == RGB32) hipMemcpyToSymbol( argb32, &p, sizeof( void* ) );
if (id == RGBH) hipMemcpyToSymbol( argb128, &p, sizeof( void* ) );
if (id == NRMLS) hipMemcpyToSymbol( nrm32, &p, sizeof( void* ) );
if (id == SKYPIX) hipMemcpyToSymbol( skyPixels, &p, sizeof( void* ) );
if (id == DBGDAT) hipMemcpyToSymbol( debugData, &p, sizeof( void* ) );
}
__host__ static void pushIntCpy( int id, const int v )
{
if (id == SKYW) hipMemcpyToSymbol( skywidth, &v, sizeof( int ) );
if (id == SKYH) hipMemcpyToSymbol( skyheight, &v, sizeof( int ) );
}
__host__ static void pushF32Cpy( int id, const float v )
{
if (id == GEPS) hipMemcpyToSymbol( geometryEpsilon, &v, sizeof( float ) );
if (id == CLMPV) hipMemcpyToSymbol( clampValue, &v, sizeof( int ) );
}
__host__ static void pushMatCpy( int id, const mat4& m )
{
if (id == SMAT) hipMemcpyToSymbol( worldToSky, &m, sizeof( mat4 ) );
}
__host__ static void pushInt4Cpy( int id, const int4& v )
{
if (id == LCNTS) hipMemcpyToSymbol( lightCounts, &v, sizeof( int4 ) );
}
#define MAXVARS 32
static void* prevPtr[MAXVARS] = {};
static int prevInt[MAXVARS] = {};
static float prevFloat[MAXVARS] = {};
static int4 prevInt4[MAXVARS] = {};
static bool prevValSet[MAXVARS] = {};
__host__ static void stagePtrCpy( int id, void* p )
{
if (prevPtr[id] == p) return; // not changed
StagedPtr n = { p, id };
stagedPtr.push_back( n );
prevPtr[id] = p;
}
__host__ static void stageIntCpy( int id, const int v )
{
if (prevValSet[id] == true && prevInt[id] == v) return;
StagedInt n = { v, id };
stagedInt.push_back( n );
prevValSet[id] = true;
prevInt[id] = v;
}
__host__ static void stageF32Cpy( int id, const float v )
{
if (prevValSet[id] == true && prevFloat[id] == v) return;
StagedF32 n = { v, id };
stagedF32.push_back( n );
prevValSet[id] = true;
prevFloat[id] = v;
}
__host__ static void stageMatCpy( int id, const mat4& m ) { StagedMat n = { m, id }; stagedMat.push_back( n ); }
__host__ static void stageInt4Cpy( int id, const int4& v )
{
if (prevValSet[id] == true && prevInt4[id].x == v.x && prevInt4[id].y == v.y && prevInt4[id].z == v.z && prevInt4[id].w == v.w) return;
StagedInt4 n = { v, id };
stagedInt4.push_back( n );
prevValSet[id] = true;
prevInt4[id] = v;
}
__host__ void stageMemcpy( void* d, void* s, int n ) { StagedCpy c = { d, s, n }; stagedCpy.push_back( c ); }
__host__ void stageInstanceDescriptors( CoreInstanceDesc* p ) { stagePtrCpy( INSTS /* instanceDescriptors */, p ); }
__host__ void stageMaterialList( CUDAMaterial* p ) { stagePtrCpy( MATS /* materials */, p ); }
__host__ void stageAreaLights( CoreLightTri* p ) { stagePtrCpy( ALGHTS /* areaLights */, p ); }
__host__ void stagePointLights( CorePointLight* p ) { stagePtrCpy( PLGHTS /* pointLights */, p ); }
__host__ void stageSpotLights( CoreSpotLight* p ) { stagePtrCpy( SLGHTS /* spotLights */, p ); }
__host__ void stageDirectionalLights( CoreDirectionalLight* p ) { stagePtrCpy( DLGHTS /* directionalLights */, p ); }
__host__ void stageARGB32Pixels( uint* p ) { stagePtrCpy( RGB32 /* argb32 */, p ); }
__host__ void stageARGB128Pixels( float4* p ) { stagePtrCpy( RGBH /* argb128 */, p ); }
__host__ void stageNRM32Pixels( uint* p ) { stagePtrCpy( NRMLS /* nrm32 */, p ); }
__host__ void stageSkyPixels( float4* p ) { stagePtrCpy( SKYPIX /* skyPixels */, p ); }
__host__ void stageSkySize( int w, int h ) { stageIntCpy( SKYW /* skywidth */, w ); stageIntCpy( SKYH /* skyheight */, h ); }
__host__ void stageWorldToSky( const mat4& worldToLight ) { stageMatCpy( SMAT /* worldToSky */, worldToLight ); }
__host__ void stageDebugData( float4* p ) { stagePtrCpy( DBGDAT /* debugData */, p ); }
__host__ void stageGeometryEpsilon( float e ) { stageF32Cpy( GEPS /* geometryEpsilon */, e ); }
__host__ void stageClampValue( float c ) { stageF32Cpy( CLMPV /* clampValue */, c ); }
__host__ void stageLightCounts( int area, int point, int spot, int directional )
{
const int4 counts = make_int4( area, point, spot, directional );
stageInt4Cpy( LCNTS /* lightCounts */, counts );
}
__host__ void pushStagedCopies()
{
for (auto c : stagedCpy) hipMemcpy( c.d, c.s, c.n, hipMemcpyHostToDevice ); stagedCpy.clear();
for (auto n : stagedPtr) pushPtrCpy( n.id, n.p ); stagedPtr.clear();
for (auto n : stagedInt) pushIntCpy( n.id, n.v ); stagedInt.clear();
for (auto n : stagedInt4) pushInt4Cpy( n.id, n.v ); stagedInt4.clear();
for (auto n : stagedF32) pushF32Cpy( n.id, n.v ); stagedF32.clear();
for (auto n : stagedMat) pushMatCpy( n.id, n.v ); stagedMat.clear();
}
// counters for persistent threads
static __device__ Counters* counters;
__global__ void InitCountersForExtend_Kernel( int pathCount )
{
if (threadIdx.x != 0) return;
counters->activePaths = pathCount; // remaining active paths
counters->shaded = 0; // persistent thread atomic for shade kernel
counters->generated = 0; // persistent thread atomic for generate in .optix.cu
counters->extensionRays = 0; // compaction counter for extension rays
counters->shadowRays = 0; // compaction counter for connections
counters->connected = 0;
counters->totalExtensionRays = pathCount;
counters->totalShadowRays = 0;
}
__host__ void InitCountersForExtend( int pathCount ) { InitCountersForExtend_Kernel << <1, 32 >> > (pathCount); }
__global__ void InitCountersForASStage2_Kernel()
{
if (threadIdx.x != 0) return;
counters->activePaths = 0; // will be filled by primary ray generation code
counters->shadowRays = 0; // compaction counter for connections
counters->extended = 0; // persistent thread atomic for genSecond in .optix.cu
counters->shaded = 0; // persistent thread atomic for shade kernel
counters->extensionRays = 0; // compaction counter for extension rays
}
__host__ void InitCountersForASStage2() { InitCountersForASStage2_Kernel << <1, 32 >> > (); }
__global__ void InitCountersSubsequent_Kernel()
{
if (threadIdx.x != 0) return;
counters->totalExtensionRays += counters->extensionRays;
counters->activePaths = counters->extensionRays; // remaining active paths
counters->extended = 0; // persistent thread atomic for genSecond in .optix.cu
counters->shaded = 0; // persistent thread atomic for shade kernel
counters->extensionRays = 0; // compaction counter for extension rays
}
__host__ void InitCountersSubsequent() { InitCountersSubsequent_Kernel << <1, 32 >> > (); }
__host__ void SetCounters( Counters* p ) { hipMemcpyToSymbol( counters, &p, sizeof( void* ) ); }
// functional blocks
#include "tools_shared.h"
#include "sampling_shared.h"
#include "material_shared.h"
#include "lights_shared.h"
#include "bsdf.h"
#include "camera.h"
#include "pathtracer.h"
#include "finalize_shared.h"
} // namespace lh2core
// EOF | 784223576c2d61a74e018eca351c123bdb04fe57.cu | /* .cuda.cu - Copyright 2019/2020 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include ".cuda.h"
namespace lh2core
{
// path tracing buffers and global variables
__constant__ CoreInstanceDesc* instanceDescriptors;
__constant__ CUDAMaterial* materials;
__constant__ CoreLightTri* areaLights;
__constant__ CorePointLight* pointLights;
__constant__ CoreSpotLight* spotLights;
__constant__ CoreDirectionalLight* directionalLights;
__constant__ int4 lightCounts; // area, point, spot, directional
__constant__ uchar4* argb32;
__constant__ float4* argb128;
__constant__ uchar4* nrm32;
__constant__ float4* skyPixels;
__constant__ int skywidth;
__constant__ int skyheight;
__constant__ PathState* pathStates;
__constant__ float4* debugData;
__constant__ mat4 worldToSky;
// path tracer settings
__constant__ __device__ float geometryEpsilon;
__constant__ __device__ float clampValue;
// staging: copies will be batched and carried out after rendering completes,
// to allow the CPU to update the scene concurrently with GPU rendering.
enum { INSTS = 0, MATS, ALGHTS, PLGHTS, SLGHTS, DLGHTS, LCNTS, RGB32, RGBH, NRMLS, SKYPIX, SKYW, SKYH, SMAT, DBGDAT, GEPS, CLMPV };
// device pointers are not real pointers for nvcc, so we need a bit of a hack.
struct StagedPtr { void* p; int id; };
struct StagedInt { int v; int id; };
struct StagedInt4 { int4 v; int id; };
struct StagedMat { mat4 v; int id; };
struct StagedF32 { float v; int id; };
struct StagedCpy { void* d; void* s; int n; };
static std::vector<StagedPtr> stagedPtr;
static std::vector<StagedInt> stagedInt;
static std::vector<StagedInt4> stagedInt4;
static std::vector<StagedMat> stagedMat;
static std::vector<StagedF32> stagedF32;
static std::vector<StagedCpy> stagedCpy;
__host__ static void pushPtrCpy( int id, void* p )
{
if (id == INSTS) cudaMemcpyToSymbol( instanceDescriptors, &p, sizeof( void* ) );
if (id == MATS) cudaMemcpyToSymbol( materials, &p, sizeof( void* ) );
if (id == ALGHTS) cudaMemcpyToSymbol( areaLights, &p, sizeof( void* ) );
if (id == PLGHTS) cudaMemcpyToSymbol( pointLights, &p, sizeof( void* ) );
if (id == SLGHTS) cudaMemcpyToSymbol( spotLights, &p, sizeof( void* ) );
if (id == DLGHTS) cudaMemcpyToSymbol( directionalLights, &p, sizeof( void* ) );
if (id == RGB32) cudaMemcpyToSymbol( argb32, &p, sizeof( void* ) );
if (id == RGBH) cudaMemcpyToSymbol( argb128, &p, sizeof( void* ) );
if (id == NRMLS) cudaMemcpyToSymbol( nrm32, &p, sizeof( void* ) );
if (id == SKYPIX) cudaMemcpyToSymbol( skyPixels, &p, sizeof( void* ) );
if (id == DBGDAT) cudaMemcpyToSymbol( debugData, &p, sizeof( void* ) );
}
__host__ static void pushIntCpy( int id, const int v )
{
if (id == SKYW) cudaMemcpyToSymbol( skywidth, &v, sizeof( int ) );
if (id == SKYH) cudaMemcpyToSymbol( skyheight, &v, sizeof( int ) );
}
__host__ static void pushF32Cpy( int id, const float v )
{
if (id == GEPS) cudaMemcpyToSymbol( geometryEpsilon, &v, sizeof( float ) );
if (id == CLMPV) cudaMemcpyToSymbol( clampValue, &v, sizeof( int ) );
}
__host__ static void pushMatCpy( int id, const mat4& m )
{
if (id == SMAT) cudaMemcpyToSymbol( worldToSky, &m, sizeof( mat4 ) );
}
__host__ static void pushInt4Cpy( int id, const int4& v )
{
if (id == LCNTS) cudaMemcpyToSymbol( lightCounts, &v, sizeof( int4 ) );
}
#define MAXVARS 32
static void* prevPtr[MAXVARS] = {};
static int prevInt[MAXVARS] = {};
static float prevFloat[MAXVARS] = {};
static int4 prevInt4[MAXVARS] = {};
static bool prevValSet[MAXVARS] = {};
__host__ static void stagePtrCpy( int id, void* p )
{
if (prevPtr[id] == p) return; // not changed
StagedPtr n = { p, id };
stagedPtr.push_back( n );
prevPtr[id] = p;
}
__host__ static void stageIntCpy( int id, const int v )
{
if (prevValSet[id] == true && prevInt[id] == v) return;
StagedInt n = { v, id };
stagedInt.push_back( n );
prevValSet[id] = true;
prevInt[id] = v;
}
__host__ static void stageF32Cpy( int id, const float v )
{
if (prevValSet[id] == true && prevFloat[id] == v) return;
StagedF32 n = { v, id };
stagedF32.push_back( n );
prevValSet[id] = true;
prevFloat[id] = v;
}
__host__ static void stageMatCpy( int id, const mat4& m ) { StagedMat n = { m, id }; stagedMat.push_back( n ); }
__host__ static void stageInt4Cpy( int id, const int4& v )
{
if (prevValSet[id] == true && prevInt4[id].x == v.x && prevInt4[id].y == v.y && prevInt4[id].z == v.z && prevInt4[id].w == v.w) return;
StagedInt4 n = { v, id };
stagedInt4.push_back( n );
prevValSet[id] = true;
prevInt4[id] = v;
}
__host__ void stageMemcpy( void* d, void* s, int n ) { StagedCpy c = { d, s, n }; stagedCpy.push_back( c ); }
__host__ void stageInstanceDescriptors( CoreInstanceDesc* p ) { stagePtrCpy( INSTS /* instanceDescriptors */, p ); }
__host__ void stageMaterialList( CUDAMaterial* p ) { stagePtrCpy( MATS /* materials */, p ); }
__host__ void stageAreaLights( CoreLightTri* p ) { stagePtrCpy( ALGHTS /* areaLights */, p ); }
__host__ void stagePointLights( CorePointLight* p ) { stagePtrCpy( PLGHTS /* pointLights */, p ); }
__host__ void stageSpotLights( CoreSpotLight* p ) { stagePtrCpy( SLGHTS /* spotLights */, p ); }
__host__ void stageDirectionalLights( CoreDirectionalLight* p ) { stagePtrCpy( DLGHTS /* directionalLights */, p ); }
__host__ void stageARGB32Pixels( uint* p ) { stagePtrCpy( RGB32 /* argb32 */, p ); }
__host__ void stageARGB128Pixels( float4* p ) { stagePtrCpy( RGBH /* argb128 */, p ); }
__host__ void stageNRM32Pixels( uint* p ) { stagePtrCpy( NRMLS /* nrm32 */, p ); }
__host__ void stageSkyPixels( float4* p ) { stagePtrCpy( SKYPIX /* skyPixels */, p ); }
__host__ void stageSkySize( int w, int h ) { stageIntCpy( SKYW /* skywidth */, w ); stageIntCpy( SKYH /* skyheight */, h ); }
__host__ void stageWorldToSky( const mat4& worldToLight ) { stageMatCpy( SMAT /* worldToSky */, worldToLight ); }
__host__ void stageDebugData( float4* p ) { stagePtrCpy( DBGDAT /* debugData */, p ); }
__host__ void stageGeometryEpsilon( float e ) { stageF32Cpy( GEPS /* geometryEpsilon */, e ); }
__host__ void stageClampValue( float c ) { stageF32Cpy( CLMPV /* clampValue */, c ); }
__host__ void stageLightCounts( int area, int point, int spot, int directional )
{
const int4 counts = make_int4( area, point, spot, directional );
stageInt4Cpy( LCNTS /* lightCounts */, counts );
}
__host__ void pushStagedCopies()
{
for (auto c : stagedCpy) cudaMemcpy( c.d, c.s, c.n, cudaMemcpyHostToDevice ); stagedCpy.clear();
for (auto n : stagedPtr) pushPtrCpy( n.id, n.p ); stagedPtr.clear();
for (auto n : stagedInt) pushIntCpy( n.id, n.v ); stagedInt.clear();
for (auto n : stagedInt4) pushInt4Cpy( n.id, n.v ); stagedInt4.clear();
for (auto n : stagedF32) pushF32Cpy( n.id, n.v ); stagedF32.clear();
for (auto n : stagedMat) pushMatCpy( n.id, n.v ); stagedMat.clear();
}
// counters for persistent threads
static __device__ Counters* counters;
__global__ void InitCountersForExtend_Kernel( int pathCount )
{
if (threadIdx.x != 0) return;
counters->activePaths = pathCount; // remaining active paths
counters->shaded = 0; // persistent thread atomic for shade kernel
counters->generated = 0; // persistent thread atomic for generate in .optix.cu
counters->extensionRays = 0; // compaction counter for extension rays
counters->shadowRays = 0; // compaction counter for connections
counters->connected = 0;
counters->totalExtensionRays = pathCount;
counters->totalShadowRays = 0;
}
__host__ void InitCountersForExtend( int pathCount ) { InitCountersForExtend_Kernel << <1, 32 >> > (pathCount); }
__global__ void InitCountersForASStage2_Kernel()
{
if (threadIdx.x != 0) return;
counters->activePaths = 0; // will be filled by primary ray generation code
counters->shadowRays = 0; // compaction counter for connections
counters->extended = 0; // persistent thread atomic for genSecond in .optix.cu
counters->shaded = 0; // persistent thread atomic for shade kernel
counters->extensionRays = 0; // compaction counter for extension rays
}
__host__ void InitCountersForASStage2() { InitCountersForASStage2_Kernel << <1, 32 >> > (); }
__global__ void InitCountersSubsequent_Kernel()
{
if (threadIdx.x != 0) return;
counters->totalExtensionRays += counters->extensionRays;
counters->activePaths = counters->extensionRays; // remaining active paths
counters->extended = 0; // persistent thread atomic for genSecond in .optix.cu
counters->shaded = 0; // persistent thread atomic for shade kernel
counters->extensionRays = 0; // compaction counter for extension rays
}
__host__ void InitCountersSubsequent() { InitCountersSubsequent_Kernel << <1, 32 >> > (); }
__host__ void SetCounters( Counters* p ) { cudaMemcpyToSymbol( counters, &p, sizeof( void* ) ); }
// functional blocks
#include "tools_shared.h"
#include "sampling_shared.h"
#include "material_shared.h"
#include "lights_shared.h"
#include "bsdf.h"
#include "camera.h"
#include "pathtracer.h"
#include "finalize_shared.h"
} // namespace lh2core
// EOF |
497b5ca7ff282408aac2447a56dcb790b9f0ade7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_pooling.h"
#include "saber/funcs/impl/cuda/vender_pooling.h"
#include "saber/funcs/calibrate.h"
#include "saber/core/tensor_op.h"
#include <cfloat>
namespace anakin {
namespace saber {
template <>
SaberStatus SaberPooling<NV, AK_FLOAT>::create(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
PoolingParam<NV> ¶m, Context<NV> &ctx) {
_impl->create(inputs, outputs, param, ctx);
return SaberSuccess;
}
template <>
SaberStatus SaberPooling<NV, AK_FLOAT>::init(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
PoolingParam<NV> ¶m, Context<NV> &ctx) {
this->_ctx = &ctx;
_impl = new VenderPooling<NV, AK_FLOAT>;
_impl->init(inputs, outputs, param, ctx);
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberPooling<NV, AK_FLOAT>::dispatch(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
PoolingParam<NV> ¶m) {
_impl->dispatch(inputs, outputs, param);
return SaberSuccess;
}
union Reg{
unsigned int idata;
char b[4];
};
__global__ void pool_s8s8_max_c4(const int nthreads,
const void* const in_data, const int channels,
const int height, const int width, const int out_height,
const int out_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
void* const out_data, float place_holder, float trans_scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % out_width;
const int ph = (index / out_width) % out_height;
const int c = (index / out_width / out_height) % channels;
const int n = index / out_width / out_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
unsigned int maxval = 0x80808080; // this is magic
const unsigned int* in_slice =
(const unsigned int*)(in_data);
int offset = (n * channels + c) * height * width;
in_slice += offset;
unsigned int *out = (unsigned int*)out_data;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
unsigned int read_in = in_slice[h * width + w];
asm volatile (" vmax4.s32.s32.s32 %0, %1, %2, %0;"
: "=r"(maxval) : "r"(maxval), "r"(read_in));
}
}
out[index] = maxval;
}
}
__global__ void pool_s8s8_avrg_c4(const int nthreads,
const void* const in_data, const int channels,
const int height, const int width, const int out_height,
const int out_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
void* const out_data, float avg_1, float trans_scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % out_width;
const int ph = (index / out_width) % out_height;
const int c = (index / out_width / out_height) % channels;
const int n = index / out_width / out_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Reg reg;
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
const unsigned int* in_slice =
(const unsigned int*)(in_data);
int offset = (n * channels + c) * height * width;
in_slice += offset;
unsigned int *out = (unsigned int*)out_data;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
reg.idata = in_slice[h * width + w];
sum0 += reg.b[0];
sum1 += reg.b[1];
sum2 += reg.b[2];
sum3 += reg.b[3];
}
}
float sum0f = (float)sum0 * avg_1;
float sum1f = (float)sum1 * avg_1;
float sum2f = (float)sum2 * avg_1;
float sum3f = (float)sum3 * avg_1;
reg.b[0] = static_cast<char>(sum0f);
reg.b[1] = static_cast<char>(sum1f);
reg.b[2] = static_cast<char>(sum2f);
reg.b[3] = static_cast<char>(sum3f);
// printf("%x\n", reg.idata);
out[index] = reg.idata;
}
}
template <>
SaberStatus SaberPooling<NV, AK_INT8>::create(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
PoolingParam<NV> ¶m, Context<NV> &ctx) {
if (inputs[0]->get_dtype() == AK_FLOAT) {
Shape in_shape = inputs[0]->valid_shape();
_int8_input.re_alloc(in_shape, AK_INT8);
_int8_input.set_scale(inputs[0]->get_scale());
_int8_input.set_layout(Layout_NCHW_C4);
}
if (outputs[0]->get_dtype() == AK_FLOAT) {
Shape out_shape = outputs[0]->valid_shape();
_int8_output.re_alloc(out_shape, AK_INT8);
_int8_output.set_scale(outputs[0]->get_scale());
_int8_output.set_layout(Layout_NCHW_C4);
}
return SaberSuccess;
}
template <>
SaberStatus SaberPooling<NV, AK_INT8>::init(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
PoolingParam<NV> ¶m, Context<NV> &ctx) {
this->_ctx = &ctx;
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberPooling<NV, AK_INT8>::dispatch(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
PoolingParam<NV> ¶m) {
CHECK_GE(inputs[0]->get_scale().size(), 1) << "not found scale factor!!!";
CHECK_GE(outputs[0]->get_scale().size(), 1) << "not found scale factor!!!";
CHECK_EQ(inputs[0]->channel() % 4, 0) << "not a multipler of 4";
float in_scale = inputs[0]->get_scale()[0];
float out_scale = outputs[0]->get_scale()[0];
int count = outputs[0]->valid_size() / 4;
int channels = inputs[0]->channel() / 4;
int height = inputs[0]->height();
int width = inputs[0]->width();
int out_height = outputs[0]->height();
int out_width = outputs[0]->width();
int stride_h = param.stride_h;
int stride_w = param.stride_w;
int pad_h = param.pad_h;
int pad_w = param.pad_w;
int window_h = param.window_h;
int window_w = param.window_w;
auto stream = _ctx->get_compute_stream();
const void* in_data = nullptr;
void* out_data = nullptr;
if (inputs[0]->get_dtype() == AK_FLOAT) {
conv_calibrate_fp32_int8_c4(_int8_input, *inputs[0], in_scale, *(this->_ctx));
in_data = _int8_input.data();
} else {
in_data = inputs[0]->data();
}
if (outputs[0]->get_dtype() == AK_FLOAT) {
out_data = _int8_output.mutable_data();
} else {
out_data = outputs[0]->mutable_data();
}
float kernel_size = window_h * window_w;
kernel_size = 1.f / kernel_size;
switch (param.pooling_type) {
case Pooling_max:
pool_s8s8_max_c4 << < CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS,
0, stream >> > (count,
in_data, channels, height, width,
out_height, out_width, window_h, window_w,
stride_h, stride_w, pad_h, pad_w, out_data,
kernel_size, in_scale / out_scale);
break;
case Pooling_average_include_padding:
pool_s8s8_avrg_c4 << < CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS,
0, stream >> > (count,
in_data, channels, height, width,
out_height, out_width, window_h, window_w,
stride_h, stride_w, pad_h, pad_w, out_data,
kernel_size, in_scale / out_scale);
break;
default:
LOG(FATAL) << "not support yet!!!" << param.pooling_type;
break;
}
if (outputs[0]->get_dtype() == AK_FLOAT) {
calibrate_int8_c4_fp32(*outputs[0], _int8_output, out_scale, *_ctx);
}
return SaberSuccess;
}
DEFINE_OP_TEMPLATE(SaberPooling, PoolingParam, NV, AK_HALF);
}
} | 497b5ca7ff282408aac2447a56dcb790b9f0ade7.cu |
#include "saber/funcs/impl/cuda/saber_pooling.h"
#include "saber/funcs/impl/cuda/vender_pooling.h"
#include "saber/funcs/calibrate.h"
#include "saber/core/tensor_op.h"
#include <cfloat>
namespace anakin {
namespace saber {
template <>
SaberStatus SaberPooling<NV, AK_FLOAT>::create(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
PoolingParam<NV> ¶m, Context<NV> &ctx) {
_impl->create(inputs, outputs, param, ctx);
return SaberSuccess;
}
template <>
SaberStatus SaberPooling<NV, AK_FLOAT>::init(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
PoolingParam<NV> ¶m, Context<NV> &ctx) {
this->_ctx = &ctx;
_impl = new VenderPooling<NV, AK_FLOAT>;
_impl->init(inputs, outputs, param, ctx);
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberPooling<NV, AK_FLOAT>::dispatch(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
PoolingParam<NV> ¶m) {
_impl->dispatch(inputs, outputs, param);
return SaberSuccess;
}
union Reg{
unsigned int idata;
char b[4];
};
__global__ void pool_s8s8_max_c4(const int nthreads,
const void* const in_data, const int channels,
const int height, const int width, const int out_height,
const int out_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
void* const out_data, float place_holder, float trans_scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % out_width;
const int ph = (index / out_width) % out_height;
const int c = (index / out_width / out_height) % channels;
const int n = index / out_width / out_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
unsigned int maxval = 0x80808080; // this is magic
const unsigned int* in_slice =
(const unsigned int*)(in_data);
int offset = (n * channels + c) * height * width;
in_slice += offset;
unsigned int *out = (unsigned int*)out_data;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
unsigned int read_in = in_slice[h * width + w];
asm volatile (" vmax4.s32.s32.s32 %0, %1, %2, %0;"
: "=r"(maxval) : "r"(maxval), "r"(read_in));
}
}
out[index] = maxval;
}
}
__global__ void pool_s8s8_avrg_c4(const int nthreads,
const void* const in_data, const int channels,
const int height, const int width, const int out_height,
const int out_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
void* const out_data, float avg_1, float trans_scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % out_width;
const int ph = (index / out_width) % out_height;
const int c = (index / out_width / out_height) % channels;
const int n = index / out_width / out_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Reg reg;
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
const unsigned int* in_slice =
(const unsigned int*)(in_data);
int offset = (n * channels + c) * height * width;
in_slice += offset;
unsigned int *out = (unsigned int*)out_data;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
reg.idata = in_slice[h * width + w];
sum0 += reg.b[0];
sum1 += reg.b[1];
sum2 += reg.b[2];
sum3 += reg.b[3];
}
}
float sum0f = (float)sum0 * avg_1;
float sum1f = (float)sum1 * avg_1;
float sum2f = (float)sum2 * avg_1;
float sum3f = (float)sum3 * avg_1;
reg.b[0] = static_cast<char>(sum0f);
reg.b[1] = static_cast<char>(sum1f);
reg.b[2] = static_cast<char>(sum2f);
reg.b[3] = static_cast<char>(sum3f);
// printf("%x\n", reg.idata);
out[index] = reg.idata;
}
}
template <>
SaberStatus SaberPooling<NV, AK_INT8>::create(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
PoolingParam<NV> ¶m, Context<NV> &ctx) {
if (inputs[0]->get_dtype() == AK_FLOAT) {
Shape in_shape = inputs[0]->valid_shape();
_int8_input.re_alloc(in_shape, AK_INT8);
_int8_input.set_scale(inputs[0]->get_scale());
_int8_input.set_layout(Layout_NCHW_C4);
}
if (outputs[0]->get_dtype() == AK_FLOAT) {
Shape out_shape = outputs[0]->valid_shape();
_int8_output.re_alloc(out_shape, AK_INT8);
_int8_output.set_scale(outputs[0]->get_scale());
_int8_output.set_layout(Layout_NCHW_C4);
}
return SaberSuccess;
}
template <>
SaberStatus SaberPooling<NV, AK_INT8>::init(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
PoolingParam<NV> ¶m, Context<NV> &ctx) {
this->_ctx = &ctx;
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberPooling<NV, AK_INT8>::dispatch(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
PoolingParam<NV> ¶m) {
CHECK_GE(inputs[0]->get_scale().size(), 1) << "not found scale factor!!!";
CHECK_GE(outputs[0]->get_scale().size(), 1) << "not found scale factor!!!";
CHECK_EQ(inputs[0]->channel() % 4, 0) << "not a multipler of 4";
float in_scale = inputs[0]->get_scale()[0];
float out_scale = outputs[0]->get_scale()[0];
int count = outputs[0]->valid_size() / 4;
int channels = inputs[0]->channel() / 4;
int height = inputs[0]->height();
int width = inputs[0]->width();
int out_height = outputs[0]->height();
int out_width = outputs[0]->width();
int stride_h = param.stride_h;
int stride_w = param.stride_w;
int pad_h = param.pad_h;
int pad_w = param.pad_w;
int window_h = param.window_h;
int window_w = param.window_w;
auto stream = _ctx->get_compute_stream();
const void* in_data = nullptr;
void* out_data = nullptr;
if (inputs[0]->get_dtype() == AK_FLOAT) {
conv_calibrate_fp32_int8_c4(_int8_input, *inputs[0], in_scale, *(this->_ctx));
in_data = _int8_input.data();
} else {
in_data = inputs[0]->data();
}
if (outputs[0]->get_dtype() == AK_FLOAT) {
out_data = _int8_output.mutable_data();
} else {
out_data = outputs[0]->mutable_data();
}
float kernel_size = window_h * window_w;
kernel_size = 1.f / kernel_size;
switch (param.pooling_type) {
case Pooling_max:
pool_s8s8_max_c4 << < CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS,
0, stream >> > (count,
in_data, channels, height, width,
out_height, out_width, window_h, window_w,
stride_h, stride_w, pad_h, pad_w, out_data,
kernel_size, in_scale / out_scale);
break;
case Pooling_average_include_padding:
pool_s8s8_avrg_c4 << < CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS,
0, stream >> > (count,
in_data, channels, height, width,
out_height, out_width, window_h, window_w,
stride_h, stride_w, pad_h, pad_w, out_data,
kernel_size, in_scale / out_scale);
break;
default:
LOG(FATAL) << "not support yet!!!" << param.pooling_type;
break;
}
if (outputs[0]->get_dtype() == AK_FLOAT) {
calibrate_int8_c4_fp32(*outputs[0], _int8_output, out_scale, *_ctx);
}
return SaberSuccess;
}
DEFINE_OP_TEMPLATE(SaberPooling, PoolingParam, NV, AK_HALF);
}
} |
b244726625fd463e88fd3901a42295b65b3f5d50.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrixMult.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Md = NULL;
hipMalloc(&Md, XSIZE*YSIZE);
const double *Nd = NULL;
hipMalloc(&Nd, XSIZE*YSIZE);
double *Pd = NULL;
hipMalloc(&Pd, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matrixMult), dim3(gridBlock),dim3(threadBlock), 0, 0, Md,Nd,Pd,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matrixMult), dim3(gridBlock),dim3(threadBlock), 0, 0, Md,Nd,Pd,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matrixMult), dim3(gridBlock),dim3(threadBlock), 0, 0, Md,Nd,Pd,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b244726625fd463e88fd3901a42295b65b3f5d50.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matrixMult.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Md = NULL;
cudaMalloc(&Md, XSIZE*YSIZE);
const double *Nd = NULL;
cudaMalloc(&Nd, XSIZE*YSIZE);
double *Pd = NULL;
cudaMalloc(&Pd, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matrixMult<<<gridBlock,threadBlock>>>(Md,Nd,Pd,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matrixMult<<<gridBlock,threadBlock>>>(Md,Nd,Pd,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matrixMult<<<gridBlock,threadBlock>>>(Md,Nd,Pd,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a8dc6723240a964f8b9acc322ba9a742263f11f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <THH/THHAtomics.cuh>
#include <stdio.h>
#include <math.h>
#include <float.h>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
#define ROUND_OFF 50000
#define WARPS_PER_BLOCK 1
#define THREADS_PER_WARP 32
#define kMaxThreadsPerBlock 1024
// == Correlation Kernel
template <typename Dtype>
__global__ void CorrelateData(const int nthreads, int num, int topwidth,
int topheight, int topchannels, int topcount,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2,
int bottomwidth, int bottomheight, int bottomchannels,
const Dtype *bottom0, const Dtype *bottom1, Dtype *top) {
extern __shared__ char patch_data_char[];
Dtype *patch_data = reinterpret_cast<Dtype *>(patch_data_char);
// First (upper left) position of kernel upper-left corner
// in current center position of neighborhood in image 1
int x1 = blockIdx.x * stride1 + max_displacement;
int y1 = blockIdx.y * stride1 + max_displacement;
int item = blockIdx.z;
int ch_off = threadIdx.x;
// Load 3D patch into shared shared memory
for (int j = 0; j < kernel_size; j++) { // HEIGHT
for (int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) {
// CHANNELS
int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch;
int idxPatchData = ji_off + ch;
patch_data[idxPatchData] = bottom0[idx1];
}
}
}
__syncthreads();
__shared__ Dtype sum[THREADS_PER_WARP * WARPS_PER_BLOCK];
// Compute correlation
for (int top_channel = 0; top_channel < topchannels; top_channel++) {
sum[ch_off] = 0;
int s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (top_channel / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
for (int j = 0; j < kernel_size; j++) { // HEIGHT
for (int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) {
// CHANNELS
int x2 = x1 + s2o;
int y2 = y1 + s2p;
int idxPatchData = ji_off + ch;
int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) * bottomchannels + ch;
sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2];
}
}
}
__syncthreads();
if (ch_off == 0) {
Dtype total_sum = 0;
for (int idx = 0; idx < THREADS_PER_WARP * WARPS_PER_BLOCK; idx++) {
total_sum += sum[idx];
}
const int index = ((top_channel * topheight + blockIdx.y) * topwidth) + blockIdx.x;
top[index + item*topcount] = total_sum;
} // Aggregate result of different threads
}
}
// == Correlation Backward Pass Kernel (For data1)
template <typename Dtype>
__global__ void CorrelateDataBackward0(const int nthreads, int num, int item,
int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight,
int bottomchannels, int bottomcount, int pad_size,
Dtype *bottom0diff, const Dtype *bottom1, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement) / stride1
Dtype sum = 0;
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth-1) && (ymin <= topheight-1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot1 = ((item * pbottomheight + (m + s2p)) * pbottomwidth + (l + s2o))\
* bottomchannels + n;
Dtype bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width\
+ (o + neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot1tmp;
}
}
}
}
}
const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size);
bottom0diff[bot0index + item * bottomcount] = sum;
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
template <typename Dtype>
__global__ void CorrelateDataBackward1(const int nthreads,
int num, int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight,
int bottomchannels, int bottomcount, int pad_size,
const Dtype *bottom0, Dtype *bottom1diff, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// int l = index % bottomwidth + pad_size; //w-pos
// int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos
// int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
Dtype sum = 0;
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
// Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement - s2p) / stride1
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
// Get bottom0 data:
int idxbot0 = ((item * pbottomheight + (m - s2p)) \
* pbottomwidth + (l - s2o)) * bottomchannels + n;
Dtype bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * \
neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y)\
* topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot0tmp;
}
}
}
}
}
const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom1diff[bot1index + item * bottomcount] = sum;
}
}
// == Forward
// == Dimension rearrangement Kernel
template <typename Dtype>
__global__ void blob_rearrange_kernel2(const Dtype* in, Dtype* out, int num,
int channels, int width, int height, int widthheight, int padding, int pwidthheight) {
// change shape from [batchsize,channel,y,x] to [batchsize,y,x,channel]
int xy = blockIdx.x * blockDim.x + threadIdx.x;
if (xy >= widthheight )
return;
int ch = blockIdx.y;
int n = blockIdx.z;
Dtype value = in[(n * channels + ch) * widthheight + xy];
__syncthreads(); // TODO: do we really need to sync?
int xpad = (xy % width + padding);
int ypad = (xy / width + padding);
int xypad = ypad * (width + 2 * padding) + xpad;
out[(n * pwidthheight + xypad) * channels + ch] = value;
}
int AssembleForward(
at::Tensor output,
at::Tensor aff,
at::Tensor input2,
at::Tensor rbot2,
int top_channels,
int top_height,
int top_width,
int pad_size,
int max_displacement,
int neighborhood_grid_radius,
int neighborhood_grid_width,
int kernel_radius,
int stride1,
int stride2,
hipStream_t stream)
{
const int bnum = input2.size(0);
const int bchannels = input2.size(1);
const int bheight = input2.size(2);
const int bwidth = input2.size(3);
const int bwidthheight = bwidth * bheight;
int threads_per_block = 16;
dim3 totalBlocksRearr((bwidthheight - 1) / threads_per_block + 1, bchannels, bnum);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input2.type(), "rearrange input2", ([&] {
hipLaunchKernelGGL(( blob_rearrange_kernel2<scalar_t>)
, dim3(totalBlocksRearr), dim3(threads_per_block), 0, stream,
input2.data<scalar_t>(), rbot2.data<scalar_t>(),
bnum, bchannels, bwidth, bheight, bwidthheight, pad_size, bwidthheight);
}));
const int paddedheight = bheight + 2 * pad_size;
const int paddedwidth = bwidth + 2 * pad_size;
const int bottomcount = bchannels * bheight * bwidth;
int botThreadCount = bottomcount;
const int gridSize = (botThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
for (int n = 0; n < bnum; n++) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input2.scalar_type(), "assemble forward", ([&] {
hipLaunchKernelGGL(( CorrelateDataBackward0<scalar_t>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream,
botThreadCount,
bnum, n, top_width, top_height, top_channels,
max_displacement, neighborhood_grid_radius, neighborhood_grid_width, kernel_radius,
stride1, stride2,
bwidth, bheight, paddedwidth, paddedheight, bchannels, bottomcount, pad_size,
output.data<scalar_t>(), rbot2.data<scalar_t>(), aff.data<scalar_t>());
}));
}
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in mx assemble forward: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
int AssembleBackward(
at::Tensor grad_output,
at::Tensor rgrad_output,
at::Tensor rbot2,
at::Tensor aff,
at::Tensor grad_aff,
at::Tensor grad_input2,
int top_channels,
int top_height,
int top_width,
int pad_size,
int max_displacement,
int kernel_size,
int neighborhood_grid_radius,
int neighborhood_grid_width,
int kernel_radius,
int stride1,
int stride2,
hipStream_t stream)
{
const int bnum = grad_output.size(0);
const int bchannels = grad_output.size(1);
const int bheight = grad_output.size(2);
const int bwidth = grad_output.size(3);
const int bwidthheight = bwidth * bheight;
const int topcount = top_width * top_height * top_channels;
int threads_per_block = 16;
dim3 totalBlocksRearr((bwidthheight - 1) / threads_per_block + 1, bchannels, bnum);
const int pwidthheight = (bwidth + 2 * pad_size) * (bheight + 2 * pad_size);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.type(), "rearrange grad_output", ([&] {
hipLaunchKernelGGL(( blob_rearrange_kernel2<scalar_t>)
, dim3(totalBlocksRearr), dim3(threads_per_block), 0, stream,
grad_output.data<scalar_t>(), rgrad_output.data<scalar_t>(),
bnum, bchannels, bwidth, bheight, bwidthheight, pad_size, pwidthheight);
}));
const int shared_memory_per_block = (kernel_size * kernel_size) * bchannels;
const int paddedheight = bheight + 2 * pad_size;
const int paddedwidth = bwidth + 2 * pad_size;
const int bottomcount = bchannels * bheight * bwidth;
int botThreadCount = bottomcount;
const int gridSize = (botThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
int topThreadCount = topcount;
dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK);
dim3 totalBlocksCorr(top_width, top_height, bnum);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "assemble backward aff", ([&] {
hipLaunchKernelGGL(( CorrelateData<scalar_t>), dim3(totalBlocksCorr), dim3(threadsPerBlock),
shared_memory_per_block * sizeof(scalar_t), stream,
topThreadCount,
bnum, top_width, top_height, top_channels, topcount,
max_displacement, neighborhood_grid_radius,
neighborhood_grid_width, kernel_radius, kernel_size,
stride1, stride2, paddedwidth, paddedheight, bchannels,
rgrad_output.data<scalar_t>(), rbot2.data<scalar_t>(), grad_aff.data<scalar_t>());
}));
for (int n = 0; n < bnum; n++) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
rbot2.scalar_type(), "assemble backward input2", ([&] {
hipLaunchKernelGGL(( CorrelateDataBackward1<scalar_t>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream,
botThreadCount,
bnum, n, top_width, top_height, top_channels,
max_displacement, neighborhood_grid_radius, neighborhood_grid_width, kernel_radius,
stride1, stride2,
bwidth, bheight, paddedwidth, paddedheight, bchannels, bottomcount, pad_size,
rgrad_output.data<scalar_t>(), grad_input2.data<scalar_t>(), aff.data<scalar_t>());
}));
}
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in mx assemble backward: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
| a8dc6723240a964f8b9acc322ba9a742263f11f7.cu | #include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <THC/THCAtomics.cuh>
#include <stdio.h>
#include <math.h>
#include <float.h>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
#define ROUND_OFF 50000
#define WARPS_PER_BLOCK 1
#define THREADS_PER_WARP 32
#define kMaxThreadsPerBlock 1024
// == Correlation Kernel
template <typename Dtype>
__global__ void CorrelateData(const int nthreads, int num, int topwidth,
int topheight, int topchannels, int topcount,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2,
int bottomwidth, int bottomheight, int bottomchannels,
const Dtype *bottom0, const Dtype *bottom1, Dtype *top) {
extern __shared__ char patch_data_char[];
Dtype *patch_data = reinterpret_cast<Dtype *>(patch_data_char);
// First (upper left) position of kernel upper-left corner
// in current center position of neighborhood in image 1
int x1 = blockIdx.x * stride1 + max_displacement;
int y1 = blockIdx.y * stride1 + max_displacement;
int item = blockIdx.z;
int ch_off = threadIdx.x;
// Load 3D patch into shared shared memory
for (int j = 0; j < kernel_size; j++) { // HEIGHT
for (int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) {
// CHANNELS
int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch;
int idxPatchData = ji_off + ch;
patch_data[idxPatchData] = bottom0[idx1];
}
}
}
__syncthreads();
__shared__ Dtype sum[THREADS_PER_WARP * WARPS_PER_BLOCK];
// Compute correlation
for (int top_channel = 0; top_channel < topchannels; top_channel++) {
sum[ch_off] = 0;
int s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (top_channel / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
for (int j = 0; j < kernel_size; j++) { // HEIGHT
for (int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) {
// CHANNELS
int x2 = x1 + s2o;
int y2 = y1 + s2p;
int idxPatchData = ji_off + ch;
int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) * bottomchannels + ch;
sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2];
}
}
}
__syncthreads();
if (ch_off == 0) {
Dtype total_sum = 0;
for (int idx = 0; idx < THREADS_PER_WARP * WARPS_PER_BLOCK; idx++) {
total_sum += sum[idx];
}
const int index = ((top_channel * topheight + blockIdx.y) * topwidth) + blockIdx.x;
top[index + item*topcount] = total_sum;
} // Aggregate result of different threads
}
}
// == Correlation Backward Pass Kernel (For data1)
template <typename Dtype>
__global__ void CorrelateDataBackward0(const int nthreads, int num, int item,
int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight,
int bottomchannels, int bottomcount, int pad_size,
Dtype *bottom0diff, const Dtype *bottom1, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement) / stride1
Dtype sum = 0;
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth-1) && (ymin <= topheight-1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot1 = ((item * pbottomheight + (m + s2p)) * pbottomwidth + (l + s2o))\
* bottomchannels + n;
Dtype bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width\
+ (o + neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot1tmp;
}
}
}
}
}
const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size);
bottom0diff[bot0index + item * bottomcount] = sum;
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
template <typename Dtype>
__global__ void CorrelateDataBackward1(const int nthreads,
int num, int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight,
int bottomchannels, int bottomcount, int pad_size,
const Dtype *bottom0, Dtype *bottom1diff, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// int l = index % bottomwidth + pad_size; //w-pos
// int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos
// int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
Dtype sum = 0;
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
// Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement - s2p) / stride1
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
// Get bottom0 data:
int idxbot0 = ((item * pbottomheight + (m - s2p)) \
* pbottomwidth + (l - s2o)) * bottomchannels + n;
Dtype bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * \
neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y)\
* topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot0tmp;
}
}
}
}
}
const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom1diff[bot1index + item * bottomcount] = sum;
}
}
// == Forward
// == Dimension rearrangement Kernel
template <typename Dtype>
__global__ void blob_rearrange_kernel2(const Dtype* in, Dtype* out, int num,
int channels, int width, int height, int widthheight, int padding, int pwidthheight) {
// change shape from [batchsize,channel,y,x] to [batchsize,y,x,channel]
int xy = blockIdx.x * blockDim.x + threadIdx.x;
if (xy >= widthheight )
return;
int ch = blockIdx.y;
int n = blockIdx.z;
Dtype value = in[(n * channels + ch) * widthheight + xy];
__syncthreads(); // TODO: do we really need to sync?
int xpad = (xy % width + padding);
int ypad = (xy / width + padding);
int xypad = ypad * (width + 2 * padding) + xpad;
out[(n * pwidthheight + xypad) * channels + ch] = value;
}
int AssembleForward(
at::Tensor output,
at::Tensor aff,
at::Tensor input2,
at::Tensor rbot2,
int top_channels,
int top_height,
int top_width,
int pad_size,
int max_displacement,
int neighborhood_grid_radius,
int neighborhood_grid_width,
int kernel_radius,
int stride1,
int stride2,
cudaStream_t stream)
{
const int bnum = input2.size(0);
const int bchannels = input2.size(1);
const int bheight = input2.size(2);
const int bwidth = input2.size(3);
const int bwidthheight = bwidth * bheight;
int threads_per_block = 16;
dim3 totalBlocksRearr((bwidthheight - 1) / threads_per_block + 1, bchannels, bnum);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input2.type(), "rearrange input2", ([&] {
blob_rearrange_kernel2<scalar_t>
<<<totalBlocksRearr, threads_per_block, 0, stream>>>
(input2.data<scalar_t>(), rbot2.data<scalar_t>(),
bnum, bchannels, bwidth, bheight, bwidthheight, pad_size, bwidthheight);
}));
const int paddedheight = bheight + 2 * pad_size;
const int paddedwidth = bwidth + 2 * pad_size;
const int bottomcount = bchannels * bheight * bwidth;
int botThreadCount = bottomcount;
const int gridSize = (botThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
for (int n = 0; n < bnum; n++) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input2.scalar_type(), "assemble forward", ([&] {
CorrelateDataBackward0<scalar_t><<<gridSize, kMaxThreadsPerBlock, 0, stream>>>(
botThreadCount,
bnum, n, top_width, top_height, top_channels,
max_displacement, neighborhood_grid_radius, neighborhood_grid_width, kernel_radius,
stride1, stride2,
bwidth, bheight, paddedwidth, paddedheight, bchannels, bottomcount, pad_size,
output.data<scalar_t>(), rbot2.data<scalar_t>(), aff.data<scalar_t>());
}));
}
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in mx assemble forward: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
int AssembleBackward(
at::Tensor grad_output,
at::Tensor rgrad_output,
at::Tensor rbot2,
at::Tensor aff,
at::Tensor grad_aff,
at::Tensor grad_input2,
int top_channels,
int top_height,
int top_width,
int pad_size,
int max_displacement,
int kernel_size,
int neighborhood_grid_radius,
int neighborhood_grid_width,
int kernel_radius,
int stride1,
int stride2,
cudaStream_t stream)
{
const int bnum = grad_output.size(0);
const int bchannels = grad_output.size(1);
const int bheight = grad_output.size(2);
const int bwidth = grad_output.size(3);
const int bwidthheight = bwidth * bheight;
const int topcount = top_width * top_height * top_channels;
int threads_per_block = 16;
dim3 totalBlocksRearr((bwidthheight - 1) / threads_per_block + 1, bchannels, bnum);
const int pwidthheight = (bwidth + 2 * pad_size) * (bheight + 2 * pad_size);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.type(), "rearrange grad_output", ([&] {
blob_rearrange_kernel2<scalar_t>
<<<totalBlocksRearr, threads_per_block, 0, stream>>>
(grad_output.data<scalar_t>(), rgrad_output.data<scalar_t>(),
bnum, bchannels, bwidth, bheight, bwidthheight, pad_size, pwidthheight);
}));
const int shared_memory_per_block = (kernel_size * kernel_size) * bchannels;
const int paddedheight = bheight + 2 * pad_size;
const int paddedwidth = bwidth + 2 * pad_size;
const int bottomcount = bchannels * bheight * bwidth;
int botThreadCount = bottomcount;
const int gridSize = (botThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
int topThreadCount = topcount;
dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK);
dim3 totalBlocksCorr(top_width, top_height, bnum);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "assemble backward aff", ([&] {
CorrelateData<scalar_t><<<totalBlocksCorr, threadsPerBlock,
shared_memory_per_block * sizeof(scalar_t), stream>>>(
topThreadCount,
bnum, top_width, top_height, top_channels, topcount,
max_displacement, neighborhood_grid_radius,
neighborhood_grid_width, kernel_radius, kernel_size,
stride1, stride2, paddedwidth, paddedheight, bchannels,
rgrad_output.data<scalar_t>(), rbot2.data<scalar_t>(), grad_aff.data<scalar_t>());
}));
for (int n = 0; n < bnum; n++) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
rbot2.scalar_type(), "assemble backward input2", ([&] {
CorrelateDataBackward1<scalar_t><<<gridSize, kMaxThreadsPerBlock, 0, stream>>>(
botThreadCount,
bnum, n, top_width, top_height, top_channels,
max_displacement, neighborhood_grid_radius, neighborhood_grid_width, kernel_radius,
stride1, stride2,
bwidth, bheight, paddedwidth, paddedheight, bchannels, bottomcount, pad_size,
rgrad_output.data<scalar_t>(), grad_input2.data<scalar_t>(), aff.data<scalar_t>());
}));
}
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in mx assemble backward: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
|
9402f64bf61e6e98feec2af44c6c4d9dce6bc8eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <ctime>
#include <climits>
#include <cmath>
#include "RNG.cuh"
using namespace std;
__global__ void RNGen_Global(unsigned int *unsignedNumbers, double *uniformNumbers, double *gaussianNumbers, double *bimodalNumbers, unsigned int totalNumbersToGenerate, unsigned int numbersToGeneratePerThread, unsigned int seed);
__host__ __device__ void RNGen_HostDev(unsigned int *unsignedNumbers, double *uniformNumbers, double *gaussianNumbers, double *bimodalNumbers, unsigned int totalNumbersToGenerate, unsigned int numbersToGeneratePerThread, unsigned int seed, unsigned int threadNumber);
__host__ void RNGen_Host(unsigned int numberOfBlocks, unsigned int numberOfThreadsPerBlock, unsigned int *unsignedNumbers, double *uniformNumbers, double *gaussianNumbers, double *bimodalNumbers, unsigned int totalNumbersToGenerate, unsigned int numbersToGeneratePerThread, unsigned int seed);
bool AreSame(unsigned int, unsigned int);
bool AreSame(double, double);
int main(){
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
unsigned int numberOfBlocks = 10;
unsigned int numberOfThreadsPerBlock = 512;
unsigned int totalNumberOfThreads = numberOfBlocks * numberOfThreadsPerBlock;
unsigned int totalNumbersToGenerate = 50000000;
unsigned int seed;
do
seed = time(NULL);
while(seed < 129 || seed > UINT_MAX - totalNumberOfThreads);
unsigned int numbersToGeneratePerThread = ceil(static_cast<double>(totalNumbersToGenerate) / totalNumberOfThreads);
cout << "Total numbers to generate: " << totalNumbersToGenerate << endl;
cout << "Total number of threads: " << totalNumberOfThreads << endl;
cout << "Total numbers to generate per thread: " << numbersToGeneratePerThread << endl;
// CPU-side results
unsigned int *cpu_unsignedNumbers = new unsigned int[totalNumbersToGenerate];
double *cpu_uniformNumbers = new double[totalNumbersToGenerate];
double *cpu_gaussianNumbers = new double[totalNumbersToGenerate];
double *cpu_bimodalNumbers = new double[totalNumbersToGenerate];
// GPU-side results
unsigned int *gpu_unsignedNumbers = new unsigned int[totalNumbersToGenerate];
double *gpu_uniformNumbers = new double[totalNumbersToGenerate];
double *gpu_gaussianNumbers = new double[totalNumbersToGenerate];
double *gpu_bimodalNumbers = new double[totalNumbersToGenerate];
////////////// HOST-SIDE GENERATOR //////////////
clock_t begin = clock();
RNGen_Host(numberOfBlocks, numberOfThreadsPerBlock, cpu_unsignedNumbers, cpu_uniformNumbers, cpu_gaussianNumbers, cpu_bimodalNumbers, totalNumbersToGenerate, numbersToGeneratePerThread, seed);
clock_t end = clock();
double cpu_time = double(end - begin) / CLOCKS_PER_SEC;
////////////// DEVICE-SIDE GENERATOR //////////////
unsigned int *dev_gpu_unsignedNumbers;
double *dev_gpu_uniformNumbers, *dev_gpu_gaussianNumbers, *dev_gpu_bimodalNumbers;
hipMalloc( (void **)&dev_gpu_unsignedNumbers, totalNumbersToGenerate*sizeof(unsigned int) );
hipMalloc( (void **)&dev_gpu_uniformNumbers, totalNumbersToGenerate*sizeof(double) );
hipMalloc( (void **)&dev_gpu_gaussianNumbers, totalNumbersToGenerate*sizeof(double) );
hipMalloc( (void **)&dev_gpu_bimodalNumbers, totalNumbersToGenerate*sizeof(double) );
hipEventRecord(start);
hipLaunchKernelGGL(( RNGen_Global), dim3(numberOfBlocks),dim3(numberOfThreadsPerBlock), 0, 0, dev_gpu_unsignedNumbers, dev_gpu_uniformNumbers, dev_gpu_gaussianNumbers, dev_gpu_bimodalNumbers, totalNumbersToGenerate, numbersToGeneratePerThread, seed);
hipEventRecord(stop);
hipMemcpy(gpu_unsignedNumbers, dev_gpu_unsignedNumbers, totalNumbersToGenerate*sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(gpu_uniformNumbers, dev_gpu_uniformNumbers, totalNumbersToGenerate*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(gpu_gaussianNumbers, dev_gpu_gaussianNumbers, totalNumbersToGenerate*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(gpu_bimodalNumbers, dev_gpu_bimodalNumbers, totalNumbersToGenerate*sizeof(double), hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float gpu_time = 0;
hipEventElapsedTime(&gpu_time, start, stop);
hipFree(dev_gpu_unsignedNumbers);
hipFree(dev_gpu_uniformNumbers);
hipFree(dev_gpu_gaussianNumbers);
hipFree(dev_gpu_bimodalNumbers);
cout<<endl<<"############### TIMINGS ################";
cout<<endl<<"GPU: "<<gpu_time<<" ms";
cout<<endl<<"CPU: "<<cpu_time*1000<<" ms";
cout << endl << "############### OUTPUT NUMBERS ################" << endl;
cout << endl << "CPU: " << endl;
cout << "thread\t unsigned\t uniform\t gauss\t bimodal" << endl;
for(int randomNumber=0; randomNumber<5; ++randomNumber)
cout << randomNumber << "\t" << cpu_unsignedNumbers[randomNumber] << "\t" << cpu_uniformNumbers[randomNumber] << "\t" << cpu_gaussianNumbers[randomNumber] << "\t" << cpu_bimodalNumbers[randomNumber] << endl;;
cout << ". . ." << endl;
for(int randomNumber=totalNumbersToGenerate-5; randomNumber<totalNumbersToGenerate; ++randomNumber)
cout << randomNumber << "\t" << cpu_unsignedNumbers[randomNumber] << "\t" << cpu_uniformNumbers[randomNumber] << "\t" << cpu_gaussianNumbers[randomNumber] << "\t" << cpu_bimodalNumbers[randomNumber] << endl;
cout << endl << "GPU: " << endl;
cout << "thread\t unsigned\t uniform\t gauss" << endl;
for(int randomNumber=0; randomNumber<5; ++randomNumber)
cout << randomNumber << "\t" << gpu_unsignedNumbers[randomNumber] << "\t" << gpu_uniformNumbers[randomNumber] << "\t" << gpu_gaussianNumbers[randomNumber] << "\t" << gpu_bimodalNumbers[randomNumber] << endl;
cout << ". . ." << endl;
for(int randomNumber=totalNumbersToGenerate-5; randomNumber<totalNumbersToGenerate; ++randomNumber)
cout << randomNumber << "\t" << gpu_unsignedNumbers[randomNumber] << "\t" << gpu_uniformNumbers[randomNumber] << "\t" << gpu_gaussianNumbers[randomNumber] << "\t" << gpu_bimodalNumbers[randomNumber] << endl;
cout << endl << "############### GPU-CPU COMPARISON ################" << endl << endl;
bool gpuCpuComparison = true;
for(int randomNumber=0; randomNumber<totalNumbersToGenerate; ++randomNumber){
if(!AreSame(gpu_unsignedNumbers[randomNumber], cpu_unsignedNumbers[randomNumber])){
gpuCpuComparison = false;
cout << "FAILED@step " << randomNumber << ":\t" << gpu_unsignedNumbers[randomNumber] << "\t" << cpu_unsignedNumbers[randomNumber] << endl;
}
if(!AreSame(gpu_uniformNumbers[randomNumber], cpu_uniformNumbers[randomNumber])){
gpuCpuComparison = false;
cout << "FAILED@step " << randomNumber << ":\t" << gpu_uniformNumbers[randomNumber] << "\t" << cpu_uniformNumbers[randomNumber] << endl;
}
if(!AreSame(gpu_gaussianNumbers[randomNumber], cpu_gaussianNumbers[randomNumber])){
gpuCpuComparison = false;
cout << "FAILED@step " << randomNumber << ":\t" << gpu_gaussianNumbers[randomNumber] << "\t" << cpu_gaussianNumbers[randomNumber] << endl;
}
if(!AreSame(gpu_bimodalNumbers[randomNumber], cpu_bimodalNumbers[randomNumber])){
gpuCpuComparison = false;
cout << "FAILED@step " << randomNumber << ":\t" << gpu_bimodalNumbers[randomNumber] << "\t" << cpu_bimodalNumbers[randomNumber] << endl;
}
}
if(gpuCpuComparison)
cout << "Test PASSED!" << endl;
else
cout << "Test failed..." << endl;
delete[] cpu_unsignedNumbers;
delete[] cpu_uniformNumbers;
delete[] cpu_gaussianNumbers;
delete[] cpu_bimodalNumbers;
delete[] gpu_unsignedNumbers;
delete[] gpu_uniformNumbers;
delete[] gpu_gaussianNumbers;
delete[] gpu_bimodalNumbers;
return 0;
}
__global__ void RNGen_Global(unsigned int *unsignedNumbers, double *uniformNumbers, double *gaussianNumbers, double *bimodalNumbers, unsigned int totalNumbersToGenerate, unsigned int numbersToGeneratePerThread, unsigned int seed){
unsigned int threadNumber = threadIdx.x + blockDim.x * blockIdx.x;
RNGen_HostDev(unsignedNumbers, uniformNumbers, gaussianNumbers, bimodalNumbers, totalNumbersToGenerate, numbersToGeneratePerThread, seed, threadNumber);
}
__host__ __device__ void RNGen_HostDev(unsigned int *unsignedNumbers, double *uniformNumbers, double *gaussianNumbers, double *bimodalNumbers, unsigned int totalNumbersToGenerate, unsigned int numbersToGeneratePerThread, unsigned int seed, unsigned int threadNumber){
RNG_Tausworthe supportGenerator_(seed+threadNumber);
RNG *supportGenerator = &supportGenerator_;
RNG_CombinedGenerator generator_();
RNG *generator = & generator_;
generator->SetInternalState(supportGenerator);
unsigned int unsignedNumber;
double gaussian, uniform, bimodal;
for(unsigned int RNGNumber=0; RNGNumber<numbersToGeneratePerThread; ++RNGNumber){
if(numbersToGeneratePerThread*threadNumber+RNGNumber < totalNumbersToGenerate){
unsignedNumber = generator->GetUnsignedInt();
unsignedNumbers[numbersToGeneratePerThread*threadNumber+RNGNumber] = unsignedNumber;
uniform = generator->GetUniform();
uniformNumbers[numbersToGeneratePerThread*threadNumber+RNGNumber] = uniform;
gaussian = generator->GetGauss();
gaussianNumbers[numbersToGeneratePerThread*threadNumber+RNGNumber] = gaussian;
bimodal = generator->GetBimodal();
bimodalNumbers[numbersToGeneratePerThread*threadNumber+RNGNumber] = bimodal;
}
}
}
__host__ void RNGen_Host(unsigned int numberOfBlocks, unsigned int numberOfThreadsPerBlock, unsigned int *unsignedNumbers, double *uniformNumbers, double *gaussianNumbers, double *bimodalNumbers, unsigned int totalNumbersToGenerate, unsigned int numbersToGeneratePerThread, unsigned int seed){
for(unsigned int threadNumber=0; threadNumber<numberOfBlocks*numberOfThreadsPerBlock; ++threadNumber)
RNGen_HostDev(unsignedNumbers, uniformNumbers, gaussianNumbers, bimodalNumbers, totalNumbersToGenerate, numbersToGeneratePerThread, seed, threadNumber);
}
bool AreSame(unsigned int a, unsigned int b){
unsigned int diff = a - b;
double epsilon = 0.0001; // 0.01% difference
return (fabs(static_cast<double>(diff) / a) < epsilon);
}
bool AreSame(double a, double b){
double diff = a - b;
double epsilon = 0.0001; // 0.01% difference
return (fabs(diff / a) < epsilon);
}
| 9402f64bf61e6e98feec2af44c6c4d9dce6bc8eb.cu |
#include <iostream>
#include <ctime>
#include <climits>
#include <cmath>
#include "RNG.cuh"
using namespace std;
__global__ void RNGen_Global(unsigned int *unsignedNumbers, double *uniformNumbers, double *gaussianNumbers, double *bimodalNumbers, unsigned int totalNumbersToGenerate, unsigned int numbersToGeneratePerThread, unsigned int seed);
__host__ __device__ void RNGen_HostDev(unsigned int *unsignedNumbers, double *uniformNumbers, double *gaussianNumbers, double *bimodalNumbers, unsigned int totalNumbersToGenerate, unsigned int numbersToGeneratePerThread, unsigned int seed, unsigned int threadNumber);
__host__ void RNGen_Host(unsigned int numberOfBlocks, unsigned int numberOfThreadsPerBlock, unsigned int *unsignedNumbers, double *uniformNumbers, double *gaussianNumbers, double *bimodalNumbers, unsigned int totalNumbersToGenerate, unsigned int numbersToGeneratePerThread, unsigned int seed);
bool AreSame(unsigned int, unsigned int);
bool AreSame(double, double);
int main(){
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
unsigned int numberOfBlocks = 10;
unsigned int numberOfThreadsPerBlock = 512;
unsigned int totalNumberOfThreads = numberOfBlocks * numberOfThreadsPerBlock;
unsigned int totalNumbersToGenerate = 50000000;
unsigned int seed;
do
seed = time(NULL);
while(seed < 129 || seed > UINT_MAX - totalNumberOfThreads);
unsigned int numbersToGeneratePerThread = ceil(static_cast<double>(totalNumbersToGenerate) / totalNumberOfThreads);
cout << "Total numbers to generate: " << totalNumbersToGenerate << endl;
cout << "Total number of threads: " << totalNumberOfThreads << endl;
cout << "Total numbers to generate per thread: " << numbersToGeneratePerThread << endl;
// CPU-side results
unsigned int *cpu_unsignedNumbers = new unsigned int[totalNumbersToGenerate];
double *cpu_uniformNumbers = new double[totalNumbersToGenerate];
double *cpu_gaussianNumbers = new double[totalNumbersToGenerate];
double *cpu_bimodalNumbers = new double[totalNumbersToGenerate];
// GPU-side results
unsigned int *gpu_unsignedNumbers = new unsigned int[totalNumbersToGenerate];
double *gpu_uniformNumbers = new double[totalNumbersToGenerate];
double *gpu_gaussianNumbers = new double[totalNumbersToGenerate];
double *gpu_bimodalNumbers = new double[totalNumbersToGenerate];
////////////// HOST-SIDE GENERATOR //////////////
clock_t begin = clock();
RNGen_Host(numberOfBlocks, numberOfThreadsPerBlock, cpu_unsignedNumbers, cpu_uniformNumbers, cpu_gaussianNumbers, cpu_bimodalNumbers, totalNumbersToGenerate, numbersToGeneratePerThread, seed);
clock_t end = clock();
double cpu_time = double(end - begin) / CLOCKS_PER_SEC;
////////////// DEVICE-SIDE GENERATOR //////////////
unsigned int *dev_gpu_unsignedNumbers;
double *dev_gpu_uniformNumbers, *dev_gpu_gaussianNumbers, *dev_gpu_bimodalNumbers;
cudaMalloc( (void **)&dev_gpu_unsignedNumbers, totalNumbersToGenerate*sizeof(unsigned int) );
cudaMalloc( (void **)&dev_gpu_uniformNumbers, totalNumbersToGenerate*sizeof(double) );
cudaMalloc( (void **)&dev_gpu_gaussianNumbers, totalNumbersToGenerate*sizeof(double) );
cudaMalloc( (void **)&dev_gpu_bimodalNumbers, totalNumbersToGenerate*sizeof(double) );
cudaEventRecord(start);
RNGen_Global<<<numberOfBlocks,numberOfThreadsPerBlock>>>(dev_gpu_unsignedNumbers, dev_gpu_uniformNumbers, dev_gpu_gaussianNumbers, dev_gpu_bimodalNumbers, totalNumbersToGenerate, numbersToGeneratePerThread, seed);
cudaEventRecord(stop);
cudaMemcpy(gpu_unsignedNumbers, dev_gpu_unsignedNumbers, totalNumbersToGenerate*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(gpu_uniformNumbers, dev_gpu_uniformNumbers, totalNumbersToGenerate*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(gpu_gaussianNumbers, dev_gpu_gaussianNumbers, totalNumbersToGenerate*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(gpu_bimodalNumbers, dev_gpu_bimodalNumbers, totalNumbersToGenerate*sizeof(double), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float gpu_time = 0;
cudaEventElapsedTime(&gpu_time, start, stop);
cudaFree(dev_gpu_unsignedNumbers);
cudaFree(dev_gpu_uniformNumbers);
cudaFree(dev_gpu_gaussianNumbers);
cudaFree(dev_gpu_bimodalNumbers);
cout<<endl<<"############### TIMINGS ################";
cout<<endl<<"GPU: "<<gpu_time<<" ms";
cout<<endl<<"CPU: "<<cpu_time*1000<<" ms";
cout << endl << "############### OUTPUT NUMBERS ################" << endl;
cout << endl << "CPU: " << endl;
cout << "thread\t unsigned\t uniform\t gauss\t bimodal" << endl;
for(int randomNumber=0; randomNumber<5; ++randomNumber)
cout << randomNumber << "\t" << cpu_unsignedNumbers[randomNumber] << "\t" << cpu_uniformNumbers[randomNumber] << "\t" << cpu_gaussianNumbers[randomNumber] << "\t" << cpu_bimodalNumbers[randomNumber] << endl;;
cout << ". . ." << endl;
for(int randomNumber=totalNumbersToGenerate-5; randomNumber<totalNumbersToGenerate; ++randomNumber)
cout << randomNumber << "\t" << cpu_unsignedNumbers[randomNumber] << "\t" << cpu_uniformNumbers[randomNumber] << "\t" << cpu_gaussianNumbers[randomNumber] << "\t" << cpu_bimodalNumbers[randomNumber] << endl;
cout << endl << "GPU: " << endl;
cout << "thread\t unsigned\t uniform\t gauss" << endl;
for(int randomNumber=0; randomNumber<5; ++randomNumber)
cout << randomNumber << "\t" << gpu_unsignedNumbers[randomNumber] << "\t" << gpu_uniformNumbers[randomNumber] << "\t" << gpu_gaussianNumbers[randomNumber] << "\t" << gpu_bimodalNumbers[randomNumber] << endl;
cout << ". . ." << endl;
for(int randomNumber=totalNumbersToGenerate-5; randomNumber<totalNumbersToGenerate; ++randomNumber)
cout << randomNumber << "\t" << gpu_unsignedNumbers[randomNumber] << "\t" << gpu_uniformNumbers[randomNumber] << "\t" << gpu_gaussianNumbers[randomNumber] << "\t" << gpu_bimodalNumbers[randomNumber] << endl;
cout << endl << "############### GPU-CPU COMPARISON ################" << endl << endl;
bool gpuCpuComparison = true;
for(int randomNumber=0; randomNumber<totalNumbersToGenerate; ++randomNumber){
if(!AreSame(gpu_unsignedNumbers[randomNumber], cpu_unsignedNumbers[randomNumber])){
gpuCpuComparison = false;
cout << "FAILED@step " << randomNumber << ":\t" << gpu_unsignedNumbers[randomNumber] << "\t" << cpu_unsignedNumbers[randomNumber] << endl;
}
if(!AreSame(gpu_uniformNumbers[randomNumber], cpu_uniformNumbers[randomNumber])){
gpuCpuComparison = false;
cout << "FAILED@step " << randomNumber << ":\t" << gpu_uniformNumbers[randomNumber] << "\t" << cpu_uniformNumbers[randomNumber] << endl;
}
if(!AreSame(gpu_gaussianNumbers[randomNumber], cpu_gaussianNumbers[randomNumber])){
gpuCpuComparison = false;
cout << "FAILED@step " << randomNumber << ":\t" << gpu_gaussianNumbers[randomNumber] << "\t" << cpu_gaussianNumbers[randomNumber] << endl;
}
if(!AreSame(gpu_bimodalNumbers[randomNumber], cpu_bimodalNumbers[randomNumber])){
gpuCpuComparison = false;
cout << "FAILED@step " << randomNumber << ":\t" << gpu_bimodalNumbers[randomNumber] << "\t" << cpu_bimodalNumbers[randomNumber] << endl;
}
}
if(gpuCpuComparison)
cout << "Test PASSED!" << endl;
else
cout << "Test failed..." << endl;
delete[] cpu_unsignedNumbers;
delete[] cpu_uniformNumbers;
delete[] cpu_gaussianNumbers;
delete[] cpu_bimodalNumbers;
delete[] gpu_unsignedNumbers;
delete[] gpu_uniformNumbers;
delete[] gpu_gaussianNumbers;
delete[] gpu_bimodalNumbers;
return 0;
}
__global__ void RNGen_Global(unsigned int *unsignedNumbers, double *uniformNumbers, double *gaussianNumbers, double *bimodalNumbers, unsigned int totalNumbersToGenerate, unsigned int numbersToGeneratePerThread, unsigned int seed){
unsigned int threadNumber = threadIdx.x + blockDim.x * blockIdx.x;
RNGen_HostDev(unsignedNumbers, uniformNumbers, gaussianNumbers, bimodalNumbers, totalNumbersToGenerate, numbersToGeneratePerThread, seed, threadNumber);
}
__host__ __device__ void RNGen_HostDev(unsigned int *unsignedNumbers, double *uniformNumbers, double *gaussianNumbers, double *bimodalNumbers, unsigned int totalNumbersToGenerate, unsigned int numbersToGeneratePerThread, unsigned int seed, unsigned int threadNumber){
RNG_Tausworthe supportGenerator_(seed+threadNumber);
RNG *supportGenerator = &supportGenerator_;
RNG_CombinedGenerator generator_();
RNG *generator = & generator_;
generator->SetInternalState(supportGenerator);
unsigned int unsignedNumber;
double gaussian, uniform, bimodal;
for(unsigned int RNGNumber=0; RNGNumber<numbersToGeneratePerThread; ++RNGNumber){
if(numbersToGeneratePerThread*threadNumber+RNGNumber < totalNumbersToGenerate){
unsignedNumber = generator->GetUnsignedInt();
unsignedNumbers[numbersToGeneratePerThread*threadNumber+RNGNumber] = unsignedNumber;
uniform = generator->GetUniform();
uniformNumbers[numbersToGeneratePerThread*threadNumber+RNGNumber] = uniform;
gaussian = generator->GetGauss();
gaussianNumbers[numbersToGeneratePerThread*threadNumber+RNGNumber] = gaussian;
bimodal = generator->GetBimodal();
bimodalNumbers[numbersToGeneratePerThread*threadNumber+RNGNumber] = bimodal;
}
}
}
__host__ void RNGen_Host(unsigned int numberOfBlocks, unsigned int numberOfThreadsPerBlock, unsigned int *unsignedNumbers, double *uniformNumbers, double *gaussianNumbers, double *bimodalNumbers, unsigned int totalNumbersToGenerate, unsigned int numbersToGeneratePerThread, unsigned int seed){
for(unsigned int threadNumber=0; threadNumber<numberOfBlocks*numberOfThreadsPerBlock; ++threadNumber)
RNGen_HostDev(unsignedNumbers, uniformNumbers, gaussianNumbers, bimodalNumbers, totalNumbersToGenerate, numbersToGeneratePerThread, seed, threadNumber);
}
bool AreSame(unsigned int a, unsigned int b){
unsigned int diff = a - b;
double epsilon = 0.0001; // 0.01% difference
return (fabs(static_cast<double>(diff) / a) < epsilon);
}
bool AreSame(double a, double b){
double diff = a - b;
double epsilon = 0.0001; // 0.01% difference
return (fabs(diff / a) < epsilon);
}
|
3039c4f1a8810636e0029eac689fea70ccd62e5a.hip | // !!! This is a file automatically generated by hipify!!!
/*=========================================================================
* GPU accelerated motion compensation for MRI
*
* Copyright (c) 2016 Bernhard Kainz, Amir Alansary, Maria Kuklisova-Murgasova,
* Kevin Keraudren, Markus Steinberger
* ([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
=========================================================================*/
#include "reconVolume.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/version.h>
#include <thrust/inner_product.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/count.h>
#include <thrust/system_error.h>
texture<float, 3, hipReadModeElementType > reconTex_;
template< typename T >
class divS
{
public:
T operator()(T a, T b)
{
return (b != 0) ? a / b : 0;
}
};
template< typename T >
class divSame
{
public:
T operator()(T a, T b)
{
return (b != 0) ? a / b : a;
}
};
template <typename T>
__global__ void equalizeVol(T* recon, T* volWeights, uint3 m_size)
{
const uint3 pos = make_uint3(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y,
blockIdx.z * blockDim.z + threadIdx.z);
if (pos.x >= m_size.x || pos.y >= m_size.y || pos.z >= m_size.z)
return;
unsigned int idx = pos.x + pos.y*m_size.x + pos.z * m_size.x*m_size.y;
T a = recon[idx];
T b = volWeights[idx];
recon[idx] = (b != 0) ? a / b : a;
}
template <typename T>
void ReconVolume<T>::equalize(){
unsigned int N = m_size.x*m_size.y*m_size.z;
dim3 blockSize = dim3(8, 8, 8);
dim3 gridSize = divup(dim3(m_size.x, m_size.y, m_size.z), blockSize);
equalizeVol<T> << <gridSize, blockSize >> > (this->getDataPtr(), getReconstructed_volWeigthsPtr(), m_size);
CHECK_ERROR(ReconVolume<T>::equalize());
//this does not work with CUDA 7.5 -> fallback to classic kernel
// try
// {
// thrust::device_ptr<T> ptr_recons(getDataPtr());
// thrust::device_ptr<T> ptr_count(getReconstructed_volWeigthsPtr());
// thrust::transform(ptr_recons, ptr_recons + N, ptr_count, ptr_recons, divS<T>());
/* }
catch (thrust::system_error &e)
{
// output an error message and exit
std::cerr << "Thrust error: " << e.what() << std::endl;
exit(-1);
}*/
//CHECK_ERROR(ReconVolume<T>::equalize());
checkCudaErrors(hipDeviceSynchronize());
}
template <typename T>
void ReconVolume<T>::updateReconTex(int dev)
{
//this uses only float interpolation! careful with double
checkCudaErrors(hipSetDevice(dev));
//works only for float interpolation -- will fail for double!!
// ///////////////////////////////////////////////////////////////////////////////
// // test code to fix memcheck error
// const size_t SIZE_X = this->m_size.x;
// const size_t SIZE_Y = this->m_size.y;
// const size_t SIZE_Z = this->m_size.z;
// const size_t width = sizeof(float) * SIZE_X;
// hipExtent volumeSizeBytes = make_hipExtent(width, SIZE_Y, SIZE_Z);
// // hipPitchedPtr d_volumeMem;
// // checkCudaErrors(hipMalloc3D(&d_volumeMem, volumeSizeBytes));
// // size_t size = d_volumeMem.pitch * SIZE_Y * SIZE_Z;
// hipChannelFormatDesc m_channelDesc = hipCreateChannelDesc<float>();
// hipExtent volumeSize = make_hipExtent(SIZE_X, SIZE_Y, SIZE_Z);
// //initialize the 3d texture "tex" with a 3D array "d_volumeArray"
// hipArray* m_d_reconstructed_array;
// checkCudaErrors( hipMalloc3DArray(&m_d_reconstructed_array, &m_channelDesc, volumeSize) );
// reconTex_.normalized = true; // access with normalized texture coordinates
// reconTex_.filterMode = hipFilterModeLinear; // linear interpolation
// reconTex_.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates
// reconTex_.addressMode[1] = hipAddressModeClamp;
// reconTex_.addressMode[2] = hipAddressModeClamp;
// // bind array to 3D texture
// checkCudaErrors(hipBindTextureToArray(reconTex_, m_d_reconstructed_array, m_channelDesc));
// //get the real value for 3D texture "tex"
// float *d_volumeMem;
// float *f_m_d_data = (float*) m_d_data;
// checkCudaErrors(hipMalloc((void**)&d_volumeMem, SIZE_X*SIZE_Y*SIZE_Z*sizeof(float)));
// checkCudaErrors(hipMemcpy(d_volumeMem, f_m_d_data, SIZE_X*SIZE_Y*SIZE_Z*sizeof(float), hipMemcpyHostToDevice));
// //copy d_volumeMem to 3DArray
// hipMemcpy3DParms copyParams = {0};
// copyParams.srcPtr = make_hipPitchedPtr((void*)d_volumeMem, SIZE_X*sizeof(float), SIZE_X, SIZE_Y);
// copyParams.dstArray = m_d_reconstructed_array;
// copyParams.extent = volumeSize;
// copyParams.kind = hipMemcpyDeviceToDevice;
// checkCudaErrors( hipMemcpy3D(©Params) );
// ///////////////////////////////////////////////////////////////////////////////
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)this->m_d_data, this->m_size.x*sizeof(float),this->m_size.x, this->m_size.y);
copyParams.dstArray = m_d_reconstructed_array;//a1;// dev_reconstructed_array[dev];
copyParams.extent = m_asize;
copyParams.kind = hipMemcpyDeviceToDevice;
checkCudaErrors(hipMemcpy3D(©Params));
reconTex_.addressMode[0] = hipAddressModeBorder;
reconTex_.addressMode[1] = hipAddressModeBorder;
reconTex_.addressMode[2] = hipAddressModeBorder;
reconTex_.filterMode = hipFilterModeLinear;
reconTex_.normalized = true;
checkCudaErrors(hipBindTextureToArray(reconTex_, m_d_reconstructed_array, m_channelDesc));
CHECK_ERROR(hipBindTextureToArray);
}
template <typename T>
__device__ const T & ReconVolume<T>::getReconValueFromTexture(const uint3 & pos)
{
if (pos.x >= m_size.x || pos.y >= m_size.y || pos.z >= m_size.z)
return;
// float x = (float)pos.x / m_size.x;
// float y = (float)pos.y / m_size.y;
// float z = (float)pos.z / m_size.z;
// float x = float(pos.x)+0.5f;
// float y = float(pos.y)+0.5f;
// float z = float(pos.z)+0.5f;
// unsigned int idx = pos.x + pos.y*m_size.x + pos.z*m_size.x*m_size.y;
T val = (T)tex3D(reconTex_, (T)pos.x / m_size.x, (T)pos.y / m_size.y, (T)pos.z / m_size.z);
return val;
}
template class ReconVolume < float >;
template class ReconVolume < double >;
//template void ReconVolume<float>::equalize();
//template void ReconVolume<double>::equalize(); | 3039c4f1a8810636e0029eac689fea70ccd62e5a.cu | /*=========================================================================
* GPU accelerated motion compensation for MRI
*
* Copyright (c) 2016 Bernhard Kainz, Amir Alansary, Maria Kuklisova-Murgasova,
* Kevin Keraudren, Markus Steinberger
* ([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
=========================================================================*/
#include "reconVolume.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/version.h>
#include <thrust/inner_product.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/count.h>
#include <thrust/system_error.h>
texture<float, 3, cudaReadModeElementType > reconTex_;
template< typename T >
class divS
{
public:
T operator()(T a, T b)
{
return (b != 0) ? a / b : 0;
}
};
template< typename T >
class divSame
{
public:
T operator()(T a, T b)
{
return (b != 0) ? a / b : a;
}
};
template <typename T>
__global__ void equalizeVol(T* recon, T* volWeights, uint3 m_size)
{
const uint3 pos = make_uint3(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y,
blockIdx.z * blockDim.z + threadIdx.z);
if (pos.x >= m_size.x || pos.y >= m_size.y || pos.z >= m_size.z)
return;
unsigned int idx = pos.x + pos.y*m_size.x + pos.z * m_size.x*m_size.y;
T a = recon[idx];
T b = volWeights[idx];
recon[idx] = (b != 0) ? a / b : a;
}
template <typename T>
void ReconVolume<T>::equalize(){
unsigned int N = m_size.x*m_size.y*m_size.z;
dim3 blockSize = dim3(8, 8, 8);
dim3 gridSize = divup(dim3(m_size.x, m_size.y, m_size.z), blockSize);
equalizeVol<T> << <gridSize, blockSize >> > (this->getDataPtr(), getReconstructed_volWeigthsPtr(), m_size);
CHECK_ERROR(ReconVolume<T>::equalize());
//this does not work with CUDA 7.5 -> fallback to classic kernel
// try
// {
// thrust::device_ptr<T> ptr_recons(getDataPtr());
// thrust::device_ptr<T> ptr_count(getReconstructed_volWeigthsPtr());
// thrust::transform(ptr_recons, ptr_recons + N, ptr_count, ptr_recons, divS<T>());
/* }
catch (thrust::system_error &e)
{
// output an error message and exit
std::cerr << "Thrust error: " << e.what() << std::endl;
exit(-1);
}*/
//CHECK_ERROR(ReconVolume<T>::equalize());
checkCudaErrors(cudaDeviceSynchronize());
}
template <typename T>
void ReconVolume<T>::updateReconTex(int dev)
{
//this uses only float interpolation! careful with double
checkCudaErrors(cudaSetDevice(dev));
//works only for float interpolation -- will fail for double!!
// ///////////////////////////////////////////////////////////////////////////////
// // test code to fix memcheck error
// const size_t SIZE_X = this->m_size.x;
// const size_t SIZE_Y = this->m_size.y;
// const size_t SIZE_Z = this->m_size.z;
// const size_t width = sizeof(float) * SIZE_X;
// cudaExtent volumeSizeBytes = make_cudaExtent(width, SIZE_Y, SIZE_Z);
// // cudaPitchedPtr d_volumeMem;
// // checkCudaErrors(cudaMalloc3D(&d_volumeMem, volumeSizeBytes));
// // size_t size = d_volumeMem.pitch * SIZE_Y * SIZE_Z;
// cudaChannelFormatDesc m_channelDesc = cudaCreateChannelDesc<float>();
// cudaExtent volumeSize = make_cudaExtent(SIZE_X, SIZE_Y, SIZE_Z);
// //initialize the 3d texture "tex" with a 3D array "d_volumeArray"
// cudaArray* m_d_reconstructed_array;
// checkCudaErrors( cudaMalloc3DArray(&m_d_reconstructed_array, &m_channelDesc, volumeSize) );
// reconTex_.normalized = true; // access with normalized texture coordinates
// reconTex_.filterMode = cudaFilterModeLinear; // linear interpolation
// reconTex_.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates
// reconTex_.addressMode[1] = cudaAddressModeClamp;
// reconTex_.addressMode[2] = cudaAddressModeClamp;
// // bind array to 3D texture
// checkCudaErrors(cudaBindTextureToArray(reconTex_, m_d_reconstructed_array, m_channelDesc));
// //get the real value for 3D texture "tex"
// float *d_volumeMem;
// float *f_m_d_data = (float*) m_d_data;
// checkCudaErrors(cudaMalloc((void**)&d_volumeMem, SIZE_X*SIZE_Y*SIZE_Z*sizeof(float)));
// checkCudaErrors(cudaMemcpy(d_volumeMem, f_m_d_data, SIZE_X*SIZE_Y*SIZE_Z*sizeof(float), cudaMemcpyHostToDevice));
// //copy d_volumeMem to 3DArray
// cudaMemcpy3DParms copyParams = {0};
// copyParams.srcPtr = make_cudaPitchedPtr((void*)d_volumeMem, SIZE_X*sizeof(float), SIZE_X, SIZE_Y);
// copyParams.dstArray = m_d_reconstructed_array;
// copyParams.extent = volumeSize;
// copyParams.kind = cudaMemcpyDeviceToDevice;
// checkCudaErrors( cudaMemcpy3D(©Params) );
// ///////////////////////////////////////////////////////////////////////////////
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)this->m_d_data, this->m_size.x*sizeof(float),this->m_size.x, this->m_size.y);
copyParams.dstArray = m_d_reconstructed_array;//a1;// dev_reconstructed_array[dev];
copyParams.extent = m_asize;
copyParams.kind = cudaMemcpyDeviceToDevice;
checkCudaErrors(cudaMemcpy3D(©Params));
reconTex_.addressMode[0] = cudaAddressModeBorder;
reconTex_.addressMode[1] = cudaAddressModeBorder;
reconTex_.addressMode[2] = cudaAddressModeBorder;
reconTex_.filterMode = cudaFilterModeLinear;
reconTex_.normalized = true;
checkCudaErrors(cudaBindTextureToArray(reconTex_, m_d_reconstructed_array, m_channelDesc));
CHECK_ERROR(cudaBindTextureToArray);
}
template <typename T>
__device__ const T & ReconVolume<T>::getReconValueFromTexture(const uint3 & pos)
{
if (pos.x >= m_size.x || pos.y >= m_size.y || pos.z >= m_size.z)
return;
// float x = (float)pos.x / m_size.x;
// float y = (float)pos.y / m_size.y;
// float z = (float)pos.z / m_size.z;
// float x = float(pos.x)+0.5f;
// float y = float(pos.y)+0.5f;
// float z = float(pos.z)+0.5f;
// unsigned int idx = pos.x + pos.y*m_size.x + pos.z*m_size.x*m_size.y;
T val = (T)tex3D(reconTex_, (T)pos.x / m_size.x, (T)pos.y / m_size.y, (T)pos.z / m_size.z);
return val;
}
template class ReconVolume < float >;
template class ReconVolume < double >;
//template void ReconVolume<float>::equalize();
//template void ReconVolume<double>::equalize(); |
5c327246b38d536ecd240a1e4f203083f492b6a3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <math.h>
__global__ void kernelId(int *a)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
a[idx] = idx;
}
__global__ void kernelBlockIdx(int *a)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
a[idx] = blockIdx.x;
}
__global__ void kernelThreadIdx(int *a)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
a[idx] = threadIdx.x;
}
int main(int argc, char **argv)
{
int N = atoi(argv[1]);
for (int k=0; k<3; k++)
{
int *a_h, *b_h; //pointers to host memory
int *a_d; //pointers to device memory
int i;
size_t size = N*sizeof(int);
//allocate array on host
a_h = (int *)malloc(size);
b_h = (int *)malloc(size);
//allocate array on device
hipMalloc((void **) &a_d, size);
//initialization of host data
for (i=0; i<N; i++) a_h[i] = 0;
//copy data from host to device
hipMemcpy(a_d, a_h, sizeof(int)*N, hipMemcpyHostToDevice);
//do calculation on host
int nBlocks = atoi(argv[2]);
int blockSize = atoi(argv[3]);
if (k==0) {hipLaunchKernelGGL(( kernelId), dim3(nBlocks),dim3(blockSize), 0, 0, a_d); printf("%s :", "a[i]"); }
if (k==1) {hipLaunchKernelGGL(( kernelBlockIdx), dim3(nBlocks),dim3(blockSize), 0, 0, a_d); printf("%s :", "blockIdx"); }
if (k==2) {hipLaunchKernelGGL(( kernelThreadIdx), dim3(nBlocks),dim3(blockSize), 0, 0, a_d); printf("%s :", "threadIdx"); }
//retrieve result from device and store in b_h
hipMemcpy(b_h, a_d, sizeof(int)*N, hipMemcpyDeviceToHost);
//print out the result
for (i=0; i<N; i++) printf("%2d ", b_h[i]);
printf("\n");
//cleanup
free(a_h); free(b_h); hipFree(a_d);
}
}
| 5c327246b38d536ecd240a1e4f203083f492b6a3.cu | #include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <math.h>
__global__ void kernelId(int *a)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
a[idx] = idx;
}
__global__ void kernelBlockIdx(int *a)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
a[idx] = blockIdx.x;
}
__global__ void kernelThreadIdx(int *a)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
a[idx] = threadIdx.x;
}
int main(int argc, char **argv)
{
int N = atoi(argv[1]);
for (int k=0; k<3; k++)
{
int *a_h, *b_h; //pointers to host memory
int *a_d; //pointers to device memory
int i;
size_t size = N*sizeof(int);
//allocate array on host
a_h = (int *)malloc(size);
b_h = (int *)malloc(size);
//allocate array on device
cudaMalloc((void **) &a_d, size);
//initialization of host data
for (i=0; i<N; i++) a_h[i] = 0;
//copy data from host to device
cudaMemcpy(a_d, a_h, sizeof(int)*N, cudaMemcpyHostToDevice);
//do calculation on host
int nBlocks = atoi(argv[2]);
int blockSize = atoi(argv[3]);
if (k==0) { kernelId<<<nBlocks,blockSize>>>(a_d); printf("%s :", "a[i]"); }
if (k==1) { kernelBlockIdx<<<nBlocks,blockSize>>>(a_d); printf("%s :", "blockIdx"); }
if (k==2) { kernelThreadIdx<<<nBlocks,blockSize>>>(a_d); printf("%s :", "threadIdx"); }
//retrieve result from device and store in b_h
cudaMemcpy(b_h, a_d, sizeof(int)*N, cudaMemcpyDeviceToHost);
//print out the result
for (i=0; i<N; i++) printf("%2d ", b_h[i]);
printf("\n");
//cleanup
free(a_h); free(b_h); cudaFree(a_d);
}
}
|
f43fd88f984cb846fd433346d03b05857169c2b9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "pos_update.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int nx = 1;
int ny = 1;
double dt = 1;
double *d_z = NULL;
hipMalloc(&d_z, XSIZE*YSIZE);
double *d_v = NULL;
hipMalloc(&d_v, XSIZE*YSIZE);
double *d_a = NULL;
hipMalloc(&d_a, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
pos_update), dim3(gridBlock),dim3(threadBlock), 0, 0, nx,ny,dt,d_z,d_v,d_a);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
pos_update), dim3(gridBlock),dim3(threadBlock), 0, 0, nx,ny,dt,d_z,d_v,d_a);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
pos_update), dim3(gridBlock),dim3(threadBlock), 0, 0, nx,ny,dt,d_z,d_v,d_a);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f43fd88f984cb846fd433346d03b05857169c2b9.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "pos_update.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int nx = 1;
int ny = 1;
double dt = 1;
double *d_z = NULL;
cudaMalloc(&d_z, XSIZE*YSIZE);
double *d_v = NULL;
cudaMalloc(&d_v, XSIZE*YSIZE);
double *d_a = NULL;
cudaMalloc(&d_a, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
pos_update<<<gridBlock,threadBlock>>>(nx,ny,dt,d_z,d_v,d_a);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
pos_update<<<gridBlock,threadBlock>>>(nx,ny,dt,d_z,d_v,d_a);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
pos_update<<<gridBlock,threadBlock>>>(nx,ny,dt,d_z,d_v,d_a);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
3733c41d8c8945262711536502ef5570fc0b624d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************
* CUDALERP.cu
* CUDALERP
*
* Author: Kareem Omar
* [email protected]
* https://github.com/komrad36
*
* Last updated Dec 26, 2016
*******************************************************************/
//
// The file CUDALERP.h exposes two extremely high performance GPU
// resize operations,
// CUDALERP (bilinear interpolation), and
// CUDANERP (nearest neighbor interpolation).
//
// CUDALERP offers superior accuracy to CUDA's built-in texture
// interpolator at comparable performance. The accuracy if compiled
// with -use-fast-math off is nearly equivalent to my CPU interpolator,
// KLERP, while still being as fast as the built-in interpolation.
//
// Particularly for large images, CUDALERP dramatically outperforms
// even the highly tuned CPU AVX2 versions.
//
// All functionality is contained in the header 'CUDALERP.h' and
// the source file 'CUDALERP.cu' and has no external dependencies at all.
//
// Note that these are intended for computer vision use(hence the speed)
// and are designed for grayscale images.
//
// The file 'main.cpp' is an example and speed test driver.
//
#include "CUDALERP.h"
__global__ void
#ifndef __INTELLISENSE__
__launch_bounds__(256, 0)
#endif
CUDANERP_kernel(const hipTextureObject_t d_img_tex, const float gxs, const float gys, uint8_t* __restrict const d_out, const int neww) {
uint32_t x = (blockIdx.x << 9) + (threadIdx.x << 1);
const uint32_t y = blockIdx.y;
const float fy = y*gys;
#pragma unroll
for (int i = 0; i < 2; ++i, ++x) {
const float fx = x*gxs;
float res = 255.0f*tex2D<float>(d_img_tex, fx, fy);
if (x < neww) d_out[y*neww + x] = res;
}
}
__global__ void
#ifndef __INTELLISENSE__
__launch_bounds__(256, 0)
#endif
CUDALERP_kernel(const hipTextureObject_t d_img_tex, const float gxs, const float gys, uint8_t* __restrict const d_out, const int neww) {
uint32_t x = (blockIdx.x << 9) + (threadIdx.x << 1);
const uint32_t y = blockIdx.y;
const float fy = (y + 0.5f)*gys - 0.5f;
const float wt_y = fy - floor(fy);
const float invwt_y = 1.0f - wt_y;
#pragma unroll
for (int i = 0; i < 2; ++i, ++x) {
const float fx = (x + 0.5f)*gxs - 0.5f;
// less accurate and not really much (or any) faster
// -----------------
// const float res = tex2D<float>(d_img_tex, fx, fy);
// -----------------
const float4 f = tex2Dgather<float4>(d_img_tex, fx + 0.5f, fy + 0.5f);
const float wt_x = fx - floor(fx);
const float invwt_x = 1.0f - wt_x;
const float xa = invwt_x*f.w + wt_x*f.z;
const float xb = invwt_x*f.x + wt_x*f.y;
const float res = 255.0f*(invwt_y*xa + wt_y*xb) + 0.5f;
// -----------------
if (x < neww) d_out[y*neww + x] = res;
}
}
void CUDANERP(const hipTextureObject_t d_img_tex, const int oldw, const int oldh, uint8_t* __restrict const d_out, const uint32_t neww, const uint32_t newh) {
const float gxs = static_cast<float>(oldw) / static_cast<float>(neww);
const float gys = static_cast<float>(oldh) / static_cast<float>(newh);
hipLaunchKernelGGL(( CUDANERP_kernel), dim3({((neww - 1) >> 9) + 1), dim3(newh}), 256, 0, d_img_tex, gxs, gys, d_out, neww);
hipDeviceSynchronize();
}
void CUDALERP(const hipTextureObject_t d_img_tex, const int oldw, const int oldh, uint8_t* __restrict const d_out, const uint32_t neww, const uint32_t newh) {
const float gxs = static_cast<float>(oldw) / static_cast<float>(neww);
const float gys = static_cast<float>(oldh) / static_cast<float>(newh);
hipLaunchKernelGGL(( CUDALERP_kernel), dim3({((neww - 1) >> 9) + 1), dim3(newh}), 256, 0, d_img_tex, gxs, gys, d_out, neww);
hipDeviceSynchronize();
}
| 3733c41d8c8945262711536502ef5570fc0b624d.cu | /*******************************************************************
* CUDALERP.cu
* CUDALERP
*
* Author: Kareem Omar
* [email protected]
* https://github.com/komrad36
*
* Last updated Dec 26, 2016
*******************************************************************/
//
// The file CUDALERP.h exposes two extremely high performance GPU
// resize operations,
// CUDALERP (bilinear interpolation), and
// CUDANERP (nearest neighbor interpolation).
//
// CUDALERP offers superior accuracy to CUDA's built-in texture
// interpolator at comparable performance. The accuracy if compiled
// with -use-fast-math off is nearly equivalent to my CPU interpolator,
// KLERP, while still being as fast as the built-in interpolation.
//
// Particularly for large images, CUDALERP dramatically outperforms
// even the highly tuned CPU AVX2 versions.
//
// All functionality is contained in the header 'CUDALERP.h' and
// the source file 'CUDALERP.cu' and has no external dependencies at all.
//
// Note that these are intended for computer vision use(hence the speed)
// and are designed for grayscale images.
//
// The file 'main.cpp' is an example and speed test driver.
//
#include "CUDALERP.h"
__global__ void
#ifndef __INTELLISENSE__
__launch_bounds__(256, 0)
#endif
CUDANERP_kernel(const cudaTextureObject_t d_img_tex, const float gxs, const float gys, uint8_t* __restrict const d_out, const int neww) {
uint32_t x = (blockIdx.x << 9) + (threadIdx.x << 1);
const uint32_t y = blockIdx.y;
const float fy = y*gys;
#pragma unroll
for (int i = 0; i < 2; ++i, ++x) {
const float fx = x*gxs;
float res = 255.0f*tex2D<float>(d_img_tex, fx, fy);
if (x < neww) d_out[y*neww + x] = res;
}
}
__global__ void
#ifndef __INTELLISENSE__
__launch_bounds__(256, 0)
#endif
CUDALERP_kernel(const cudaTextureObject_t d_img_tex, const float gxs, const float gys, uint8_t* __restrict const d_out, const int neww) {
uint32_t x = (blockIdx.x << 9) + (threadIdx.x << 1);
const uint32_t y = blockIdx.y;
const float fy = (y + 0.5f)*gys - 0.5f;
const float wt_y = fy - floor(fy);
const float invwt_y = 1.0f - wt_y;
#pragma unroll
for (int i = 0; i < 2; ++i, ++x) {
const float fx = (x + 0.5f)*gxs - 0.5f;
// less accurate and not really much (or any) faster
// -----------------
// const float res = tex2D<float>(d_img_tex, fx, fy);
// -----------------
const float4 f = tex2Dgather<float4>(d_img_tex, fx + 0.5f, fy + 0.5f);
const float wt_x = fx - floor(fx);
const float invwt_x = 1.0f - wt_x;
const float xa = invwt_x*f.w + wt_x*f.z;
const float xb = invwt_x*f.x + wt_x*f.y;
const float res = 255.0f*(invwt_y*xa + wt_y*xb) + 0.5f;
// -----------------
if (x < neww) d_out[y*neww + x] = res;
}
}
void CUDANERP(const cudaTextureObject_t d_img_tex, const int oldw, const int oldh, uint8_t* __restrict const d_out, const uint32_t neww, const uint32_t newh) {
const float gxs = static_cast<float>(oldw) / static_cast<float>(neww);
const float gys = static_cast<float>(oldh) / static_cast<float>(newh);
CUDANERP_kernel<<<{((neww - 1) >> 9) + 1, newh}, 256>>>(d_img_tex, gxs, gys, d_out, neww);
cudaDeviceSynchronize();
}
void CUDALERP(const cudaTextureObject_t d_img_tex, const int oldw, const int oldh, uint8_t* __restrict const d_out, const uint32_t neww, const uint32_t newh) {
const float gxs = static_cast<float>(oldw) / static_cast<float>(neww);
const float gys = static_cast<float>(oldh) / static_cast<float>(newh);
CUDALERP_kernel<<<{((neww - 1) >> 9) + 1, newh}, 256>>>(d_img_tex, gxs, gys, d_out, neww);
cudaDeviceSynchronize();
}
|
3b59ffda74cdea762bc0b92a37b10dea7e1505d0.hip | // !!! This is a file automatically generated by hipify!!!
// Vector addition: C = A * B
// using multiple GPUs with OpenMP
// Includes
#include <stdio.h>
#include <stdlib.h>
#include <omp.h> // header for OpenMP
#include <hip/hip_runtime.h>
// Variables
float* h_A; // host vectors
float* h_B;
float* h_C;
float* h_D;
float h_G=0.0;
// Functions
void RandomInit(float*, int);
// Device code
__global__ void VecDot(const float* A, const float* B, float* C, int N)
{
extern __shared__ float cache[];
int i = blockDim.x * blockIdx.x + threadIdx.x;
int cacheIndex = threadIdx.x;
float temp=0.0;
while (i < N){
temp += A[i]*B[i];
}
cache[cacheIndex] = temp;
int ib = blockDim.x/2;
while (ib != 0) {
if(cacheIndex < ib)
cache[cacheIndex] += cache[cacheIndex + ib];
__syncthreads();
ib /=2;
}
if(cacheIndex == 0)
C[blockIdx.x] = cache[0];
}
// Host code
int main(void)
{
printf("\n");
printf("Vector Addition with multiple GPUs \n");
int N, NGPU, cpu_thread_id=0;
int *Dev;
long mem = 1024*1024*1024; // 4 Giga for float data type.
printf("Enter the number of GPUs: ");
scanf("%d", &NGPU);
printf("%d\n", NGPU);
Dev = (int *)malloc(sizeof(int)*NGPU);
int numDev = 0;
printf("GPU device number: ");
for(int i = 0; i < NGPU; i++) {
scanf("%d", &Dev[i]);
printf("%d ",Dev[i]);
numDev++;
if(getchar() == '\n') break;
}
printf("\n");
if(numDev != NGPU) {
fprintf(stderr,"Should input %d GPU device numbers\n", NGPU);
exit(1);
}
printf("Enter the size of the vectors: ");
scanf("%d", &N);
printf("%d\n", N);
if (3*N > mem) {
printf("The size of these 3 vectors cannot be fitted into 4 Gbyte\n");
exit(1);
}
long size = N*sizeof(float);
// Set the sizes of threads and blocks
int threadsPerBlock;
printf("Enter the number of threads per block: ");
scanf("%d", &threadsPerBlock);
printf("%d\n", threadsPerBlock);
if(threadsPerBlock > 1024) {
printf("The number of threads per block must be less than 1024 ! \n");
exit(1);
}
int blocksPerGrid = (N + threadsPerBlock*NGPU - 1) / (threadsPerBlock*NGPU);
printf("The number of blocks is %d\n", blocksPerGrid);
if(blocksPerGrid > 2147483647) {
printf("The number of blocks must be less than 2147483647 ! \n");
exit(1);
}
int sb = NGPU*blocksPerGrid * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_B = (float*)malloc(size);
h_C = (float*)malloc(sb);
if (! h_A || ! h_B || ! h_C) {
printf("!!! Not enough memory.\n");
exit(1);
}
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// create timer
hipEvent_t start, stop;
// hipEventCreate(&start);
// hipEventCreate(&stop);
float Intime,gputime,Outime;
omp_set_num_threads(NGPU);
#pragma omp parallel private(cpu_thread_id)
{
float *d_A, *d_B, *d_C;
cpu_thread_id = omp_get_thread_num();
hipSetDevice(Dev[cpu_thread_id]);
// hipSetDevice(cpu_thread_id);
// start the timer
if(cpu_thread_id == 0) {
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
}
// Allocate vectors in device memory
hipMalloc((void**)&d_A, size/NGPU);
hipMalloc((void**)&d_B, size/NGPU);
hipMalloc((void**)&d_C, sb/NGPU);
// Copy vectors from host memory to device memory
hipMemcpy(d_A, h_A+N/NGPU*cpu_thread_id, size/NGPU, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B+N/NGPU*cpu_thread_id, size/NGPU, hipMemcpyHostToDevice);
#pragma omp barrier
// stop the timer
if(cpu_thread_id == 0) {
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime( &Intime, start, stop);
printf("Data input time for GPU: %f (ms) \n",Intime);
}
// start the timer
if(cpu_thread_id == 0) hipEventRecord(start,0);
int sm = threadsPerBlock*sizeof(float);
hipLaunchKernelGGL(( VecDot), dim3(blocksPerGrid), dim3(threadsPerBlock), sm, 0, d_A, d_B, d_C, N/NGPU);
// stop the timer
if(cpu_thread_id == 0) {
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime( &gputime, start, stop);
printf("Processing time for GPU: %f (ms) \n",gputime);
printf("GPU Gflops: %f\n",(2*N-1)/(1000000.0*gputime));
}
// Copy result from device memory to host memory
// h_C contains the result in host memory
// start the timer
if(cpu_thread_id == 0) hipEventRecord(start,0);
hipMemcpy(h_C+N/NGPU*cpu_thread_id, d_C, sb/NGPU, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// stop the timer
if(cpu_thread_id == 0) {
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime( &Outime, start, stop);
printf("Data output time for GPU: %f (ms) \n",Outime);
}
}
for(int i = 0; i < blocksPerGrid*NGPU; i++)
h_G += h_C[i];
float gputime_tot;
gputime_tot = Intime + gputime + Outime;
printf("Total time for GPU: %f (ms) \n",gputime_tot);
// start the timer
hipEventRecord(start,0);
float h_D =0.0; // compute the reference solution
for (int i = 0; i < N; ++i)
h_D += h_A[i]*h_B[i];
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float cputime;
hipEventElapsedTime( &cputime, start, stop);
printf("Processing time for CPU: %f (ms) \n",cputime);
printf("CPU Gflops: %f\n",(2*N-1)/(1000000.0*cputime));
printf("Speed up of GPU = %f\n", cputime/gputime_tot);
// Destroy timer
hipEventDestroy(start);
hipEventDestroy(stop);
// check result
printf("Check result:\n");
double diff = abs( (h_D - h_G)/h_D );
printf("|(h_G - h_D)/h_D|=%20.15e\n",diff);
printf("h_G =%20.15e\n",h_G);
printf("h_D =%20.15e\n",h_D);
for (int i=0; i < NGPU; i++) {
hipSetDevice(i);
hipDeviceReset();
}
return 0;
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i)
data[i] = rand() / (float)RAND_MAX;
}
| 3b59ffda74cdea762bc0b92a37b10dea7e1505d0.cu | // Vector addition: C = A * B
// using multiple GPUs with OpenMP
// Includes
#include <stdio.h>
#include <stdlib.h>
#include <omp.h> // header for OpenMP
#include <cuda_runtime.h>
// Variables
float* h_A; // host vectors
float* h_B;
float* h_C;
float* h_D;
float h_G=0.0;
// Functions
void RandomInit(float*, int);
// Device code
__global__ void VecDot(const float* A, const float* B, float* C, int N)
{
extern __shared__ float cache[];
int i = blockDim.x * blockIdx.x + threadIdx.x;
int cacheIndex = threadIdx.x;
float temp=0.0;
while (i < N){
temp += A[i]*B[i];
}
cache[cacheIndex] = temp;
int ib = blockDim.x/2;
while (ib != 0) {
if(cacheIndex < ib)
cache[cacheIndex] += cache[cacheIndex + ib];
__syncthreads();
ib /=2;
}
if(cacheIndex == 0)
C[blockIdx.x] = cache[0];
}
// Host code
int main(void)
{
printf("\n");
printf("Vector Addition with multiple GPUs \n");
int N, NGPU, cpu_thread_id=0;
int *Dev;
long mem = 1024*1024*1024; // 4 Giga for float data type.
printf("Enter the number of GPUs: ");
scanf("%d", &NGPU);
printf("%d\n", NGPU);
Dev = (int *)malloc(sizeof(int)*NGPU);
int numDev = 0;
printf("GPU device number: ");
for(int i = 0; i < NGPU; i++) {
scanf("%d", &Dev[i]);
printf("%d ",Dev[i]);
numDev++;
if(getchar() == '\n') break;
}
printf("\n");
if(numDev != NGPU) {
fprintf(stderr,"Should input %d GPU device numbers\n", NGPU);
exit(1);
}
printf("Enter the size of the vectors: ");
scanf("%d", &N);
printf("%d\n", N);
if (3*N > mem) {
printf("The size of these 3 vectors cannot be fitted into 4 Gbyte\n");
exit(1);
}
long size = N*sizeof(float);
// Set the sizes of threads and blocks
int threadsPerBlock;
printf("Enter the number of threads per block: ");
scanf("%d", &threadsPerBlock);
printf("%d\n", threadsPerBlock);
if(threadsPerBlock > 1024) {
printf("The number of threads per block must be less than 1024 ! \n");
exit(1);
}
int blocksPerGrid = (N + threadsPerBlock*NGPU - 1) / (threadsPerBlock*NGPU);
printf("The number of blocks is %d\n", blocksPerGrid);
if(blocksPerGrid > 2147483647) {
printf("The number of blocks must be less than 2147483647 ! \n");
exit(1);
}
int sb = NGPU*blocksPerGrid * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_B = (float*)malloc(size);
h_C = (float*)malloc(sb);
if (! h_A || ! h_B || ! h_C) {
printf("!!! Not enough memory.\n");
exit(1);
}
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// create timer
cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
float Intime,gputime,Outime;
omp_set_num_threads(NGPU);
#pragma omp parallel private(cpu_thread_id)
{
float *d_A, *d_B, *d_C;
cpu_thread_id = omp_get_thread_num();
cudaSetDevice(Dev[cpu_thread_id]);
// cudaSetDevice(cpu_thread_id);
// start the timer
if(cpu_thread_id == 0) {
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
}
// Allocate vectors in device memory
cudaMalloc((void**)&d_A, size/NGPU);
cudaMalloc((void**)&d_B, size/NGPU);
cudaMalloc((void**)&d_C, sb/NGPU);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A+N/NGPU*cpu_thread_id, size/NGPU, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B+N/NGPU*cpu_thread_id, size/NGPU, cudaMemcpyHostToDevice);
#pragma omp barrier
// stop the timer
if(cpu_thread_id == 0) {
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime( &Intime, start, stop);
printf("Data input time for GPU: %f (ms) \n",Intime);
}
// start the timer
if(cpu_thread_id == 0) cudaEventRecord(start,0);
int sm = threadsPerBlock*sizeof(float);
VecDot<<<blocksPerGrid, threadsPerBlock, sm>>>(d_A, d_B, d_C, N/NGPU);
// stop the timer
if(cpu_thread_id == 0) {
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime( &gputime, start, stop);
printf("Processing time for GPU: %f (ms) \n",gputime);
printf("GPU Gflops: %f\n",(2*N-1)/(1000000.0*gputime));
}
// Copy result from device memory to host memory
// h_C contains the result in host memory
// start the timer
if(cpu_thread_id == 0) cudaEventRecord(start,0);
cudaMemcpy(h_C+N/NGPU*cpu_thread_id, d_C, sb/NGPU, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// stop the timer
if(cpu_thread_id == 0) {
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime( &Outime, start, stop);
printf("Data output time for GPU: %f (ms) \n",Outime);
}
}
for(int i = 0; i < blocksPerGrid*NGPU; i++)
h_G += h_C[i];
float gputime_tot;
gputime_tot = Intime + gputime + Outime;
printf("Total time for GPU: %f (ms) \n",gputime_tot);
// start the timer
cudaEventRecord(start,0);
float h_D =0.0; // compute the reference solution
for (int i = 0; i < N; ++i)
h_D += h_A[i]*h_B[i];
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float cputime;
cudaEventElapsedTime( &cputime, start, stop);
printf("Processing time for CPU: %f (ms) \n",cputime);
printf("CPU Gflops: %f\n",(2*N-1)/(1000000.0*cputime));
printf("Speed up of GPU = %f\n", cputime/gputime_tot);
// Destroy timer
cudaEventDestroy(start);
cudaEventDestroy(stop);
// check result
printf("Check result:\n");
double diff = abs( (h_D - h_G)/h_D );
printf("|(h_G - h_D)/h_D|=%20.15e\n",diff);
printf("h_G =%20.15e\n",h_G);
printf("h_D =%20.15e\n",h_D);
for (int i=0; i < NGPU; i++) {
cudaSetDevice(i);
cudaDeviceReset();
}
return 0;
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i)
data[i] = rand() / (float)RAND_MAX;
}
|
78657798dd18fd07c108f7c1f3e0e67d5cbd9e90.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "network_updater_cuda.h"
#include "neural_network_cuda_exception.h"
#include "layer_testing_schema_factory.h"
#include "cuda_linear_buffer_device.h"
#include "cuda_linear_buffer_host.h"
#include "util_cuda.h"
#include "cuda_event.h"
#include "layer_updater_schema_factory.h"
#include "weight_vector_bound_cuda_factory.h"
#include <hip/hip_runtime.h>
#include <boost/format.hpp>
#include <stack>
__global__ void convert_compacted_to_raw_upd_kernel(
const uchar4 * __restrict input,
float4 * __restrict output,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
uchar4 inp = input[elem_id];
float4 val;
val.x = inp.x * (1.0F / 255.0F);
val.y = inp.y * (1.0F / 255.0F);
val.z = inp.z * (1.0F / 255.0F);
val.w = inp.w * (1.0F / 255.0F);
output[elem_id] = val;
}
}
__global__ void compute_error_upd_kernel(
float * __restrict errors,
float * __restrict mse,
const float * __restrict desired_output_neurons,
const float * __restrict actual_output_neurons,
int output_entry_id,
int output_elem_count,
int updater_entry_count)
{
int elem_id = blockIdx.x * blockDim.x + threadIdx.x;
int updater_entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (elem_id < output_elem_count) && (updater_entry_id < updater_entry_count);
if (in_bounds)
{
int offset = updater_entry_id * output_elem_count + elem_id;
float err = desired_output_neurons[output_entry_id * output_elem_count + elem_id] - actual_output_neurons[offset];
errors[offset] = err;
mse[offset] += err * err * 0.5F;
}
}
__global__ void dropout_kernel(
float * __restrict neurons,
const float * __restrict random_buf,
float dropout_rate,
int offset,
unsigned int mask,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
unsigned int random_elem_id = (elem_id + offset) & mask;
if (random_buf[random_elem_id] < dropout_rate)
neurons[elem_id] = 0.0F;
}
}
namespace nnforge
{
namespace cuda
{
unsigned int network_updater_cuda::max_entry_count_in_single_batch = 1024;
network_updater_cuda::network_updater_cuda(
network_schema_smart_ptr schema,
const std::map<unsigned int, float>& layer_to_dropout_rate_map,
const std::map<unsigned int, weight_vector_bound>& layer_to_weight_vector_bound_map,
cuda_running_configuration_const_smart_ptr cuda_config)
: network_updater(schema, layer_to_dropout_rate_map, layer_to_weight_vector_bound_map)
, cuda_config(cuda_config)
{
const const_layer_list& layer_list = *schema;
testing_layer_count = 0;
start_layer_nonempty_weights_iterator = layer_list.begin();
for(const_layer_list::const_iterator it = layer_list.begin(); it != layer_list.end(); ++it)
{
start_layer_nonempty_weights_iterator = it;
if (!(*it)->is_empty_data())
break;
testing_layer_count++;
}
for(const_layer_list::const_iterator it = layer_list.begin(); it != start_layer_nonempty_weights_iterator; ++it)
testing_schemas.push_back(single_layer_testing_schema_factory::get_const_instance().create_testing_schema_layer(*it, cuda_config));
for(const_layer_list::const_iterator it = start_layer_nonempty_weights_iterator; it != layer_list.end(); ++it)
updater_schemas.push_back(single_layer_updater_schema_factory::get_const_instance().create_updater_schema_layer(*it, cuda_config));
for(std::map<unsigned int, weight_vector_bound>::const_iterator it = this->layer_to_weight_vector_bound_map.begin(); it != this->layer_to_weight_vector_bound_map.end(); ++it)
{
unsigned int layer_id = it->first;
if (layer_id < testing_layer_count)
throw neural_network_exception((boost::format("Weight vector bound is specified fo layer %1% while it is in testing part (consisting of %2% layers) of the updater") % layer_id % testing_layer_count).str());
weight_vector_bounds.insert(std::make_pair(layer_id, single_weight_vector_bound_factory::get_const_instance().create_weight_vector_bound(layer_list[layer_id], cuda_config)));
}
setup_network_cuda();
for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it)
testing_schema_data.push_back((*it)->get_schema_buffers());
for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it)
updater_schema_data.push_back((*it)->get_schema_buffers());
}
network_updater_cuda::~network_updater_cuda()
{
}
void network_updater_cuda::setup_network_cuda()
{
command_stream = cuda_stream_smart_ptr(new cuda_stream());
data_stream = cuda_stream_smart_ptr(new cuda_stream());
}
std::vector<testing_result_smart_ptr> network_updater_cuda::actual_update(
supervised_data_reader& reader,
const std::vector<network_data_smart_ptr>& training_speed_vector_list,
std::vector<network_data_smart_ptr>& data_list)
{
std::vector<testing_result_smart_ptr> res;
entry_count_updated_in_profile_mode = 0;
reader.reset();
layer_configuration_specific input_configuration = reader.get_input_configuration();
layer_configuration_specific output_configuration = reader.get_output_configuration();
unsigned int input_neuron_count = input_configuration.get_neuron_count();
unsigned int output_neuron_count = output_configuration.get_neuron_count();
unsigned int input_neuron_count_per_feature_map = input_configuration.get_neuron_count_per_feature_map();
neuron_data_type::input_type type_code = reader.get_input_type();
size_t input_neuron_elem_size = reader.get_input_neuron_elem_size();
unsigned int updater_entry_count = static_cast<unsigned int>(data_list.size());
if (updater_entry_count == 0)
return res;
for(unsigned int i = 0; i < training_speed_vector_list.size(); ++i)
res.push_back(testing_result_smart_ptr(new testing_result(output_neuron_count)));
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > net_data = enqueue_get_data(data_list, *command_stream);
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > training_speed_data = enqueue_get_training_speed(training_speed_vector_list, *command_stream);
buffer_cuda_size_configuration buffers_config;
update_buffers_configuration(buffers_config, updater_entry_count);
buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input
buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input
buffers_config.add_per_entry_buffer(input_neuron_count * sizeof(float)); // converted input
buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output
buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output
buffers_config.add_constant_buffer(output_neuron_count * sizeof(float) * updater_entry_count); // initial error
buffers_config.add_constant_buffer(output_neuron_count * sizeof(float) * updater_entry_count); // mse
if (!random_uniform_list.empty())
buffers_config.add_constant_buffer(random_uniform_list.size() * sizeof(float)); // random_uniform_list
for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data.begin(); it != net_data.end(); ++it)
for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2)
buffers_config.add_constant_buffer((*it2)->get_size());
for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = training_speed_data.begin(); it != training_speed_data.end(); ++it)
for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2)
buffers_config.add_constant_buffer((*it2)->get_size());
unsigned int max_entry_count = std::min<unsigned int>(std::min<unsigned int>(cuda_config->get_max_entry_count(buffers_config), reader.get_entry_count()), max_entry_count_in_single_batch);
cuda_linear_buffer_device_smart_ptr input_buf[2] =
{
cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * input_neuron_elem_size)),
cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * input_neuron_elem_size)),
};
cuda_linear_buffer_device_smart_ptr output_buf[2] =
{
cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_count * sizeof(float))),
cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_count * sizeof(float))),
};
cuda_linear_buffer_device_smart_ptr input_converted_buf(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr initial_error_buf(new cuda_linear_buffer_device(output_neuron_count * updater_entry_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr mse_buf(new cuda_linear_buffer_device(output_neuron_count * updater_entry_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr random_uniform_buf;
if (!random_uniform_list.empty())
{
random_uniform_buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(random_uniform_list.size() * sizeof(float)));
cuda_safe_call(hipMemcpyAsync(*random_uniform_buf, &(*random_uniform_list.begin()), random_uniform_list.size() * sizeof(float), hipMemcpyHostToDevice, *command_stream));
}
cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf;
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > > testing_input_and_additional_buffers_pack;
for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it)
{
std::vector<cuda_linear_buffer_device_smart_ptr> additional_buffers = (*it)->allocate_additional_buffers(max_entry_count);
testing_input_and_additional_buffers_pack.push_back(std::make_pair(output_buffer, additional_buffers));
output_buffer = (*it)->get_output_buffer(output_buffer, additional_buffers);
}
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> > updater_input_and_all_buffers_pack;
for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it)
{
layer_updater_cuda::buffer_set all_buffers = (*it)->allocate_all_buffers(updater_entry_count);
updater_input_and_all_buffers_pack.push_back(std::make_pair(output_buffer, all_buffers));
output_buffer = all_buffers.output_neurons_buffer;
}
std::vector<cuda_linear_buffer_device_smart_ptr> output_errors_buffers;
cuda_linear_buffer_device_smart_ptr output_errors = initial_error_buf;
for(std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator it = updater_input_and_all_buffers_pack.rbegin(); it != updater_input_and_all_buffers_pack.rend(); ++it)
{
output_errors_buffers.push_back(output_errors);
layer_updater_cuda::buffer_set& all_buffers = it->second;
if (all_buffers.input_errors_buffer != 0)
output_errors = all_buffers.input_errors_buffer;
}
std::map<unsigned int, std::vector<cuda_linear_buffer_device_smart_ptr> > weight_vector_bound_buffers;
for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it)
weight_vector_bound_buffers.insert(std::make_pair(it->first, it->second->allocate_additional_buffers(max_entry_count)));
cuda_linear_buffer_host_smart_ptr input_host_buf(new cuda_linear_buffer_host(input_neuron_count * max_entry_count * input_neuron_elem_size));
unsigned char * input = *input_host_buf;
cuda_linear_buffer_host_smart_ptr output_host_buf(new cuda_linear_buffer_host(output_neuron_count * max_entry_count * sizeof(float)));
float * output = *output_host_buf;
// zero mse
cuda_util::set_with_value(
*cuda_config,
*mse_buf,
0.0F,
output_neuron_count * updater_entry_count,
*command_stream);
unsigned int current_data_slot = 0;
unsigned int current_command_slot = 1;
unsigned int entries_available_for_copy_in_count = reader.get_entry_count();
unsigned int entries_available_for_processing_count = 0;
cuda_event data_processed_event;
cuda_event input_copied_event;
if (cuda_config->is_flush_required())
{
cuda_safe_call(hipEventRecord(data_processed_event, *command_stream));
cuda_safe_call(hipEventQuery(data_processed_event));
}
random_generator gen = rnd::get_random_generator();
std::tr1::uniform_int<unsigned int> dist(0, static_cast<unsigned int>(random_uniform_list.size() - 1));
unsigned int mask = static_cast<unsigned int>(random_uniform_list.size() - 1);
while((entries_available_for_copy_in_count > 0) || (entries_available_for_processing_count > 0))
{
if (entries_available_for_processing_count > 0)
{
// Convert input
if (type_code == neuron_data_type::type_byte)
{
int elem_count = (input_neuron_count * entries_available_for_processing_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
hipLaunchKernelGGL(( convert_compacted_to_raw_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, *command_stream,
*input_buf[current_command_slot],
*input_converted_buf,
elem_count);
}
else if (type_code == neuron_data_type::type_float)
{
cuda_safe_call(hipMemcpyAsync(
*input_converted_buf,
*input_buf[current_command_slot],
input_neuron_count * entries_available_for_processing_count * sizeof(float),
hipMemcpyDeviceToDevice,
*command_stream));
}
else throw neural_network_exception((boost::format("actual_update cannot handle input neurons of type %1%") % type_code).str());
// Run ann
{
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > >::iterator input_and_additional_buffers_pack_it = testing_input_and_additional_buffers_pack.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = testing_schema_data.begin();
unsigned int layer_id = 0;
layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin();
for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it, ++input_and_additional_buffers_pack_it, ++schema_data_it, ++layer_id, ++layer_config_it)
{
std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(layer_id);
if (dropout_it != layer_to_dropout_rate_map.end())
{
unsigned int offset = dist(gen);
enqueue_dropout(
*command_stream,
random_uniform_buf,
input_and_additional_buffers_pack_it->first,
dropout_it->second,
mask,
entries_available_for_processing_count * layer_config_it->get_neuron_count(),
offset);
}
(*it)->enqueue_test(
*command_stream,
*schema_data_it,
std::vector<const_cuda_linear_buffer_device_smart_ptr>(),
input_and_additional_buffers_pack_it->first,
input_and_additional_buffers_pack_it->second,
entries_available_for_processing_count);
}
}
// Apply dropout to the input of the first updater layer
{
std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(testing_layer_count);
if (dropout_it != layer_to_dropout_rate_map.end())
{
unsigned int offset = dist(gen);
enqueue_dropout(
*command_stream,
random_uniform_buf,
updater_input_and_all_buffers_pack[0].first,
dropout_it->second,
mask,
entries_available_for_processing_count * layer_config_list[testing_layer_count].get_neuron_count(),
offset);
}
}
for(unsigned int input_entry_id = 0; input_entry_id < entries_available_for_processing_count; ++input_entry_id)
{
std::stack<unsigned int> offset_list;
// Forward updater
{
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.begin();
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = updater_schema_data.begin();
unsigned int layer_id = testing_layer_count;
layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin() + testing_layer_count;
for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++net_data_it, ++layer_id, ++layer_config_it)
{
if (it != updater_list.begin())
{
std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(layer_id);
if (dropout_it != layer_to_dropout_rate_map.end())
{
unsigned int offset = dist(gen);
offset_list.push(offset);
enqueue_dropout(
*command_stream,
random_uniform_buf,
input_and_all_buffers_pack_it->first,
dropout_it->second,
mask,
updater_entry_count * layer_config_it->get_neuron_count(),
offset);
}
}
(*it)->enqueue_test(
it == updater_list.begin() ? input_entry_id : 0,
*command_stream,
*schema_data_it,
*net_data_it,
input_and_all_buffers_pack_it->first,
input_and_all_buffers_pack_it->second.output_neurons_buffer,
input_and_all_buffers_pack_it->second.additional_buffers,
input_and_all_buffers_pack_it->second.dynamic_memobjects,
updater_entry_count);
}
}
// Compute errors
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_neuron_count,
updater_entry_count,
1);
hipLaunchKernelGGL(( compute_error_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, *command_stream,
*initial_error_buf,
*mse_buf,
*output_buf[current_command_slot],
*output_buffer,
input_entry_id,
output_neuron_count,
updater_entry_count);
}
// Backward updater
{
std::vector<cuda_linear_buffer_device_smart_ptr>::iterator output_errors_it = output_errors_buffers.begin();
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.rbegin();
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_it = net_data.rbegin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator training_speed_data_it = training_speed_data.rbegin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator schema_data_it = updater_schema_data.rbegin();
unsigned int reverse_layer_id = static_cast<unsigned int>(updater_list.size() + testing_layer_count) - 1;
layer_configuration_specific_list::const_reverse_iterator layer_config_it = layer_config_list.rbegin() + 1;
for(std::vector<layer_updater_cuda_smart_ptr>::reverse_iterator it = updater_list.rbegin(); it != updater_list.rend(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++training_speed_data_it, ++output_errors_it, ++net_data_it, --reverse_layer_id, ++layer_config_it)
{
if (it != (updater_list.rend() - 1))
{
(*it)->enqueue_backprop(
*command_stream,
*schema_data_it,
*net_data_it,
input_and_all_buffers_pack_it->second.output_neurons_buffer,
input_and_all_buffers_pack_it->first,
*output_errors_it,
input_and_all_buffers_pack_it->second.input_errors_buffer,
input_and_all_buffers_pack_it->second.additional_buffers,
input_and_all_buffers_pack_it->second.dynamic_memobjects,
updater_entry_count);
std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(reverse_layer_id);
if (dropout_it != layer_to_dropout_rate_map.end())
{
unsigned int offset = offset_list.top();
offset_list.pop();
enqueue_dropout(
*command_stream,
random_uniform_buf,
(input_and_all_buffers_pack_it->second.input_errors_buffer == 0) ? *output_errors_it : input_and_all_buffers_pack_it->second.input_errors_buffer,
dropout_it->second,
mask,
updater_entry_count * layer_config_it->get_neuron_count(),
offset);
}
}
(*it)->enqueue_update_weights(
(it == (updater_list.rend() - 1)) ? input_entry_id : 0,
*command_stream,
*net_data_it,
*schema_data_it,
*training_speed_data_it,
*output_errors_it,
input_and_all_buffers_pack_it->first,
input_and_all_buffers_pack_it->second.additional_buffers,
input_and_all_buffers_pack_it->second.dynamic_memobjects,
updater_entry_count);
weight_vector_bound_map::iterator bound_it = weight_vector_bounds.find(reverse_layer_id);
if (bound_it != weight_vector_bounds.end())
{
const weight_vector_bound& bound = layer_to_weight_vector_bound_map.find(reverse_layer_id)->second;
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers = weight_vector_bound_buffers.find(reverse_layer_id)->second;
bound_it->second->enqueue_normalize_weights(
*command_stream,
bound,
*net_data_it,
additional_buffers,
updater_entry_count);
}
}
}
if (((input_entry_id % 16) == 1) && cuda_config->is_flush_required())
{
cuda_safe_call(hipEventRecord(data_processed_event, *command_stream));
cuda_safe_call(hipEventQuery(data_processed_event));
}
} // for(unsigned int input_entry_id
if (profile_mode)
entry_count_updated_in_profile_mode += entries_available_for_processing_count;
for(std::vector<testing_result_smart_ptr>::iterator it = res.begin(); it != res.end(); ++it)
(*it)->entry_count += entries_available_for_processing_count;
if (cuda_config->is_flush_required())
{
cuda_safe_call(hipEventRecord(data_processed_event, *command_stream));
cuda_safe_call(hipEventQuery(data_processed_event));
}
} // if (entries_available_for_processing_count > 0)
unsigned int entries_read_count = 0;
if (entries_available_for_copy_in_count > 0)
{
unsigned int entries_to_read_count = std::min<unsigned int>(max_entry_count, entries_available_for_copy_in_count);
while(entries_read_count < entries_to_read_count)
{
bool entry_read = reader.read(
input + (input_neuron_count * entries_read_count * input_neuron_elem_size),
output + (output_neuron_count * entries_read_count));
if (!entry_read)
break;
entries_read_count++;
}
cuda_safe_call(hipMemcpyAsync(
*(input_buf[current_data_slot]),
input,
entries_read_count * input_neuron_count * input_neuron_elem_size,
hipMemcpyHostToDevice,
*data_stream));
cuda_safe_call(hipMemcpyAsync(
*(output_buf[current_data_slot]),
output,
entries_read_count * output_neuron_count * sizeof(float),
hipMemcpyHostToDevice,
*data_stream));
}
cuda_safe_call(hipStreamSynchronize(*data_stream));
cuda_safe_call(hipStreamSynchronize(*command_stream));
entries_available_for_processing_count = entries_read_count;
entries_available_for_copy_in_count -= entries_read_count;
current_data_slot = 1 - current_data_slot;
current_command_slot = 1 - current_command_slot;
if (profile_mode)
entries_available_for_copy_in_count = 0;
}
read_data(net_data, data_list, *command_stream);
std::vector<float> mse_list(output_neuron_count * updater_entry_count);
cuda_safe_call(hipMemcpyAsync(&(*mse_list.begin()), *mse_buf, mse_list.size() * sizeof(float), hipMemcpyDeviceToHost, *command_stream));
cuda_safe_call(hipStreamSynchronize(*command_stream));
for(unsigned int i = 0; i < updater_entry_count; ++i)
std::copy(mse_list.begin() + output_neuron_count * i, mse_list.begin() + output_neuron_count * (i + 1), res[i]->cumulative_mse_list.begin());
return res;
}
void network_updater_cuda::layer_config_list_modified()
{
layer_configuration_specific_list::const_iterator it_conf = layer_config_list.begin();
tester_list.clear();
for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it, ++it_conf)
{
tester_list.push_back(
(*it)->create_tester(
*it_conf,
*(it_conf + 1)));
}
updater_list.clear();
for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it, ++it_conf)
{
updater_list.push_back(
(*it)->create_updater(
*it_conf,
*(it_conf + 1),
(it_conf > layer_config_list.begin() + testing_layer_count),
(it_conf > layer_config_list.begin() + testing_layer_count)));
}
}
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::enqueue_get_training_speed(
const std::vector<network_data_smart_ptr>& training_speed_list,
hipStream_t stream_id) const
{
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > res;
const network_data_smart_ptr& first_data = training_speed_list[0];
for(unsigned int layer_id = testing_layer_count; layer_id < updater_schemas.size() + testing_layer_count; ++layer_id)
{
std::vector<const_cuda_linear_buffer_device_smart_ptr> buffer_list;
unsigned int subindex = 0;
for(std::vector<std::vector<float> >::iterator it = (*first_data)[layer_id]->begin(); it != (*first_data)[layer_id]->end(); ++it, ++subindex)
{
size_t single_size = it->size();
std::vector<float> pack(single_size * training_speed_list.size());
std::vector<float>::iterator fill_it = pack.begin();
for(std::vector<network_data_smart_ptr>::const_iterator sample_it = training_speed_list.begin(); sample_it != training_speed_list.end(); sample_it++)
{
const std::vector<float>& inp_buf = (*sample_it)->at(layer_id)->at(subindex);
fill_it = std::copy(inp_buf.begin(), inp_buf.end(), fill_it);
}
buffer_list.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(
&(*pack.begin()),
pack.size() * sizeof(float),
stream_id)));
}
res.push_back(buffer_list);
}
return res;
}
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::enqueue_get_data(
const std::vector<network_data_smart_ptr>& data_list,
hipStream_t stream_id) const
{
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res;
const network_data_smart_ptr& first_data = data_list[0];
for(unsigned int layer_id = testing_layer_count; layer_id < updater_schemas.size() + testing_layer_count; ++layer_id)
{
std::vector<cuda_linear_buffer_device_smart_ptr> buffer_list;
unsigned int subindex = 0;
for(std::vector<std::vector<float> >::iterator it = (*first_data)[layer_id]->begin(); it != (*first_data)[layer_id]->end(); ++it, ++subindex)
{
size_t single_size = it->size();
std::vector<float> pack(single_size * data_list.size());
std::vector<float>::iterator fill_it = pack.begin();
for(std::vector<network_data_smart_ptr>::const_iterator sample_it = data_list.begin(); sample_it != data_list.end(); sample_it++)
{
const std::vector<float>& inp_buf = (*sample_it)->at(layer_id)->at(subindex);
fill_it = std::copy(inp_buf.begin(), inp_buf.end(), fill_it);
}
buffer_list.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(
&(*pack.begin()),
pack.size() * sizeof(float),
stream_id)));
}
res.push_back(buffer_list);
}
return res;
}
void network_updater_cuda::read_data(
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data_list,
std::vector<network_data_smart_ptr>& res,
hipStream_t stream_id) const
{
const network_data_smart_ptr& first_data = res[0];
unsigned int layer_id = testing_layer_count;
for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator src_it = data_list.begin(); src_it != data_list.end(); ++src_it, ++layer_id)
{
unsigned int subindex = 0;
for(std::vector<cuda_linear_buffer_device_smart_ptr>::iterator src_it2 = src_it->begin(); src_it2 != src_it->end(); ++src_it2, ++subindex)
{
cuda_linear_buffer_device_smart_ptr src = *src_it2;
std::vector<float> pack(src->get_size() / sizeof(float));
cuda_safe_call(hipMemcpyAsync(&(*pack.begin()), *src, pack.size() * sizeof(float), hipMemcpyDeviceToHost, stream_id));
cuda_safe_call(hipStreamSynchronize(stream_id));
std::vector<float>::const_iterator src_buf_it = pack.begin();
for(std::vector<network_data_smart_ptr>::const_iterator sample_it = res.begin(); sample_it != res.end(); sample_it++)
{
std::vector<float>& dst_buf = (*sample_it)->at(layer_id)->at(subindex);
std::copy(src_buf_it, src_buf_it + dst_buf.size(), dst_buf.begin());
src_buf_it += dst_buf.size();
}
}
}
}
void network_updater_cuda::update_buffers_configuration(
buffer_cuda_size_configuration& buffer_configuration,
unsigned int updater_entry_count) const
{
for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = testing_schema_data.begin(); it != testing_schema_data.end(); ++it)
for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2)
buffer_configuration.add_constant_buffer((*it2)->get_size());
for(std::vector<layer_tester_cuda_smart_ptr>::const_iterator it = tester_list.begin(); it != tester_list.end(); ++it)
(*it)->update_buffer_configuration(buffer_configuration);
for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = updater_schema_data.begin(); it != updater_schema_data.end(); ++it)
for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2)
buffer_configuration.add_constant_buffer((*it2)->get_size());
for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it)
(*it)->update_buffer_configuration(buffer_configuration, updater_entry_count);
for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it)
it->second->update_buffer_configuration(buffer_configuration, updater_entry_count);
}
unsigned int network_updater_cuda::get_max_batch_size() const
{
buffer_cuda_size_configuration buffer_configuration;
for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it)
(*it)->update_buffer_configuration(buffer_configuration);
for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it)
it->second->update_buffer_configuration(buffer_configuration);
return cuda_config->get_max_entry_count(buffer_configuration, 0.5F);
}
void network_updater_cuda::enqueue_dropout(
hipStream_t stream_id,
const_cuda_linear_buffer_device_smart_ptr random_buffer,
cuda_linear_buffer_device_smart_ptr target_buffer,
float dropout_rate,
unsigned int mask,
unsigned int elem_count,
unsigned int offset_in_random_list)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
hipLaunchKernelGGL(( dropout_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*target_buffer,
*random_buffer,
dropout_rate,
offset_in_random_list,
mask,
elem_count);
}
}
}
| 78657798dd18fd07c108f7c1f3e0e67d5cbd9e90.cu | /*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "network_updater_cuda.h"
#include "neural_network_cuda_exception.h"
#include "layer_testing_schema_factory.h"
#include "cuda_linear_buffer_device.h"
#include "cuda_linear_buffer_host.h"
#include "util_cuda.h"
#include "cuda_event.h"
#include "layer_updater_schema_factory.h"
#include "weight_vector_bound_cuda_factory.h"
#include <cuda_runtime.h>
#include <boost/format.hpp>
#include <stack>
__global__ void convert_compacted_to_raw_upd_kernel(
const uchar4 * __restrict input,
float4 * __restrict output,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
uchar4 inp = input[elem_id];
float4 val;
val.x = inp.x * (1.0F / 255.0F);
val.y = inp.y * (1.0F / 255.0F);
val.z = inp.z * (1.0F / 255.0F);
val.w = inp.w * (1.0F / 255.0F);
output[elem_id] = val;
}
}
__global__ void compute_error_upd_kernel(
float * __restrict errors,
float * __restrict mse,
const float * __restrict desired_output_neurons,
const float * __restrict actual_output_neurons,
int output_entry_id,
int output_elem_count,
int updater_entry_count)
{
int elem_id = blockIdx.x * blockDim.x + threadIdx.x;
int updater_entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (elem_id < output_elem_count) && (updater_entry_id < updater_entry_count);
if (in_bounds)
{
int offset = updater_entry_id * output_elem_count + elem_id;
float err = desired_output_neurons[output_entry_id * output_elem_count + elem_id] - actual_output_neurons[offset];
errors[offset] = err;
mse[offset] += err * err * 0.5F;
}
}
__global__ void dropout_kernel(
float * __restrict neurons,
const float * __restrict random_buf,
float dropout_rate,
int offset,
unsigned int mask,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
unsigned int random_elem_id = (elem_id + offset) & mask;
if (random_buf[random_elem_id] < dropout_rate)
neurons[elem_id] = 0.0F;
}
}
namespace nnforge
{
namespace cuda
{
unsigned int network_updater_cuda::max_entry_count_in_single_batch = 1024;
network_updater_cuda::network_updater_cuda(
network_schema_smart_ptr schema,
const std::map<unsigned int, float>& layer_to_dropout_rate_map,
const std::map<unsigned int, weight_vector_bound>& layer_to_weight_vector_bound_map,
cuda_running_configuration_const_smart_ptr cuda_config)
: network_updater(schema, layer_to_dropout_rate_map, layer_to_weight_vector_bound_map)
, cuda_config(cuda_config)
{
const const_layer_list& layer_list = *schema;
testing_layer_count = 0;
start_layer_nonempty_weights_iterator = layer_list.begin();
for(const_layer_list::const_iterator it = layer_list.begin(); it != layer_list.end(); ++it)
{
start_layer_nonempty_weights_iterator = it;
if (!(*it)->is_empty_data())
break;
testing_layer_count++;
}
for(const_layer_list::const_iterator it = layer_list.begin(); it != start_layer_nonempty_weights_iterator; ++it)
testing_schemas.push_back(single_layer_testing_schema_factory::get_const_instance().create_testing_schema_layer(*it, cuda_config));
for(const_layer_list::const_iterator it = start_layer_nonempty_weights_iterator; it != layer_list.end(); ++it)
updater_schemas.push_back(single_layer_updater_schema_factory::get_const_instance().create_updater_schema_layer(*it, cuda_config));
for(std::map<unsigned int, weight_vector_bound>::const_iterator it = this->layer_to_weight_vector_bound_map.begin(); it != this->layer_to_weight_vector_bound_map.end(); ++it)
{
unsigned int layer_id = it->first;
if (layer_id < testing_layer_count)
throw neural_network_exception((boost::format("Weight vector bound is specified fo layer %1% while it is in testing part (consisting of %2% layers) of the updater") % layer_id % testing_layer_count).str());
weight_vector_bounds.insert(std::make_pair(layer_id, single_weight_vector_bound_factory::get_const_instance().create_weight_vector_bound(layer_list[layer_id], cuda_config)));
}
setup_network_cuda();
for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it)
testing_schema_data.push_back((*it)->get_schema_buffers());
for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it)
updater_schema_data.push_back((*it)->get_schema_buffers());
}
network_updater_cuda::~network_updater_cuda()
{
}
void network_updater_cuda::setup_network_cuda()
{
command_stream = cuda_stream_smart_ptr(new cuda_stream());
data_stream = cuda_stream_smart_ptr(new cuda_stream());
}
std::vector<testing_result_smart_ptr> network_updater_cuda::actual_update(
supervised_data_reader& reader,
const std::vector<network_data_smart_ptr>& training_speed_vector_list,
std::vector<network_data_smart_ptr>& data_list)
{
std::vector<testing_result_smart_ptr> res;
entry_count_updated_in_profile_mode = 0;
reader.reset();
layer_configuration_specific input_configuration = reader.get_input_configuration();
layer_configuration_specific output_configuration = reader.get_output_configuration();
unsigned int input_neuron_count = input_configuration.get_neuron_count();
unsigned int output_neuron_count = output_configuration.get_neuron_count();
unsigned int input_neuron_count_per_feature_map = input_configuration.get_neuron_count_per_feature_map();
neuron_data_type::input_type type_code = reader.get_input_type();
size_t input_neuron_elem_size = reader.get_input_neuron_elem_size();
unsigned int updater_entry_count = static_cast<unsigned int>(data_list.size());
if (updater_entry_count == 0)
return res;
for(unsigned int i = 0; i < training_speed_vector_list.size(); ++i)
res.push_back(testing_result_smart_ptr(new testing_result(output_neuron_count)));
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > net_data = enqueue_get_data(data_list, *command_stream);
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > training_speed_data = enqueue_get_training_speed(training_speed_vector_list, *command_stream);
buffer_cuda_size_configuration buffers_config;
update_buffers_configuration(buffers_config, updater_entry_count);
buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input
buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input
buffers_config.add_per_entry_buffer(input_neuron_count * sizeof(float)); // converted input
buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output
buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output
buffers_config.add_constant_buffer(output_neuron_count * sizeof(float) * updater_entry_count); // initial error
buffers_config.add_constant_buffer(output_neuron_count * sizeof(float) * updater_entry_count); // mse
if (!random_uniform_list.empty())
buffers_config.add_constant_buffer(random_uniform_list.size() * sizeof(float)); // random_uniform_list
for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data.begin(); it != net_data.end(); ++it)
for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2)
buffers_config.add_constant_buffer((*it2)->get_size());
for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = training_speed_data.begin(); it != training_speed_data.end(); ++it)
for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2)
buffers_config.add_constant_buffer((*it2)->get_size());
unsigned int max_entry_count = std::min<unsigned int>(std::min<unsigned int>(cuda_config->get_max_entry_count(buffers_config), reader.get_entry_count()), max_entry_count_in_single_batch);
cuda_linear_buffer_device_smart_ptr input_buf[2] =
{
cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * input_neuron_elem_size)),
cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * input_neuron_elem_size)),
};
cuda_linear_buffer_device_smart_ptr output_buf[2] =
{
cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_count * sizeof(float))),
cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_count * sizeof(float))),
};
cuda_linear_buffer_device_smart_ptr input_converted_buf(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr initial_error_buf(new cuda_linear_buffer_device(output_neuron_count * updater_entry_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr mse_buf(new cuda_linear_buffer_device(output_neuron_count * updater_entry_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr random_uniform_buf;
if (!random_uniform_list.empty())
{
random_uniform_buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(random_uniform_list.size() * sizeof(float)));
cuda_safe_call(cudaMemcpyAsync(*random_uniform_buf, &(*random_uniform_list.begin()), random_uniform_list.size() * sizeof(float), cudaMemcpyHostToDevice, *command_stream));
}
cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf;
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > > testing_input_and_additional_buffers_pack;
for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it)
{
std::vector<cuda_linear_buffer_device_smart_ptr> additional_buffers = (*it)->allocate_additional_buffers(max_entry_count);
testing_input_and_additional_buffers_pack.push_back(std::make_pair(output_buffer, additional_buffers));
output_buffer = (*it)->get_output_buffer(output_buffer, additional_buffers);
}
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> > updater_input_and_all_buffers_pack;
for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it)
{
layer_updater_cuda::buffer_set all_buffers = (*it)->allocate_all_buffers(updater_entry_count);
updater_input_and_all_buffers_pack.push_back(std::make_pair(output_buffer, all_buffers));
output_buffer = all_buffers.output_neurons_buffer;
}
std::vector<cuda_linear_buffer_device_smart_ptr> output_errors_buffers;
cuda_linear_buffer_device_smart_ptr output_errors = initial_error_buf;
for(std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator it = updater_input_and_all_buffers_pack.rbegin(); it != updater_input_and_all_buffers_pack.rend(); ++it)
{
output_errors_buffers.push_back(output_errors);
layer_updater_cuda::buffer_set& all_buffers = it->second;
if (all_buffers.input_errors_buffer != 0)
output_errors = all_buffers.input_errors_buffer;
}
std::map<unsigned int, std::vector<cuda_linear_buffer_device_smart_ptr> > weight_vector_bound_buffers;
for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it)
weight_vector_bound_buffers.insert(std::make_pair(it->first, it->second->allocate_additional_buffers(max_entry_count)));
cuda_linear_buffer_host_smart_ptr input_host_buf(new cuda_linear_buffer_host(input_neuron_count * max_entry_count * input_neuron_elem_size));
unsigned char * input = *input_host_buf;
cuda_linear_buffer_host_smart_ptr output_host_buf(new cuda_linear_buffer_host(output_neuron_count * max_entry_count * sizeof(float)));
float * output = *output_host_buf;
// zero mse
cuda_util::set_with_value(
*cuda_config,
*mse_buf,
0.0F,
output_neuron_count * updater_entry_count,
*command_stream);
unsigned int current_data_slot = 0;
unsigned int current_command_slot = 1;
unsigned int entries_available_for_copy_in_count = reader.get_entry_count();
unsigned int entries_available_for_processing_count = 0;
cuda_event data_processed_event;
cuda_event input_copied_event;
if (cuda_config->is_flush_required())
{
cuda_safe_call(cudaEventRecord(data_processed_event, *command_stream));
cuda_safe_call(cudaEventQuery(data_processed_event));
}
random_generator gen = rnd::get_random_generator();
std::tr1::uniform_int<unsigned int> dist(0, static_cast<unsigned int>(random_uniform_list.size() - 1));
unsigned int mask = static_cast<unsigned int>(random_uniform_list.size() - 1);
while((entries_available_for_copy_in_count > 0) || (entries_available_for_processing_count > 0))
{
if (entries_available_for_processing_count > 0)
{
// Convert input
if (type_code == neuron_data_type::type_byte)
{
int elem_count = (input_neuron_count * entries_available_for_processing_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
convert_compacted_to_raw_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, *command_stream>>>(
*input_buf[current_command_slot],
*input_converted_buf,
elem_count);
}
else if (type_code == neuron_data_type::type_float)
{
cuda_safe_call(cudaMemcpyAsync(
*input_converted_buf,
*input_buf[current_command_slot],
input_neuron_count * entries_available_for_processing_count * sizeof(float),
cudaMemcpyDeviceToDevice,
*command_stream));
}
else throw neural_network_exception((boost::format("actual_update cannot handle input neurons of type %1%") % type_code).str());
// Run ann
{
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > >::iterator input_and_additional_buffers_pack_it = testing_input_and_additional_buffers_pack.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = testing_schema_data.begin();
unsigned int layer_id = 0;
layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin();
for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it, ++input_and_additional_buffers_pack_it, ++schema_data_it, ++layer_id, ++layer_config_it)
{
std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(layer_id);
if (dropout_it != layer_to_dropout_rate_map.end())
{
unsigned int offset = dist(gen);
enqueue_dropout(
*command_stream,
random_uniform_buf,
input_and_additional_buffers_pack_it->first,
dropout_it->second,
mask,
entries_available_for_processing_count * layer_config_it->get_neuron_count(),
offset);
}
(*it)->enqueue_test(
*command_stream,
*schema_data_it,
std::vector<const_cuda_linear_buffer_device_smart_ptr>(),
input_and_additional_buffers_pack_it->first,
input_and_additional_buffers_pack_it->second,
entries_available_for_processing_count);
}
}
// Apply dropout to the input of the first updater layer
{
std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(testing_layer_count);
if (dropout_it != layer_to_dropout_rate_map.end())
{
unsigned int offset = dist(gen);
enqueue_dropout(
*command_stream,
random_uniform_buf,
updater_input_and_all_buffers_pack[0].first,
dropout_it->second,
mask,
entries_available_for_processing_count * layer_config_list[testing_layer_count].get_neuron_count(),
offset);
}
}
for(unsigned int input_entry_id = 0; input_entry_id < entries_available_for_processing_count; ++input_entry_id)
{
std::stack<unsigned int> offset_list;
// Forward updater
{
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.begin();
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = updater_schema_data.begin();
unsigned int layer_id = testing_layer_count;
layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin() + testing_layer_count;
for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++net_data_it, ++layer_id, ++layer_config_it)
{
if (it != updater_list.begin())
{
std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(layer_id);
if (dropout_it != layer_to_dropout_rate_map.end())
{
unsigned int offset = dist(gen);
offset_list.push(offset);
enqueue_dropout(
*command_stream,
random_uniform_buf,
input_and_all_buffers_pack_it->first,
dropout_it->second,
mask,
updater_entry_count * layer_config_it->get_neuron_count(),
offset);
}
}
(*it)->enqueue_test(
it == updater_list.begin() ? input_entry_id : 0,
*command_stream,
*schema_data_it,
*net_data_it,
input_and_all_buffers_pack_it->first,
input_and_all_buffers_pack_it->second.output_neurons_buffer,
input_and_all_buffers_pack_it->second.additional_buffers,
input_and_all_buffers_pack_it->second.dynamic_memobjects,
updater_entry_count);
}
}
// Compute errors
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_neuron_count,
updater_entry_count,
1);
compute_error_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, *command_stream>>>(
*initial_error_buf,
*mse_buf,
*output_buf[current_command_slot],
*output_buffer,
input_entry_id,
output_neuron_count,
updater_entry_count);
}
// Backward updater
{
std::vector<cuda_linear_buffer_device_smart_ptr>::iterator output_errors_it = output_errors_buffers.begin();
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.rbegin();
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_it = net_data.rbegin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator training_speed_data_it = training_speed_data.rbegin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator schema_data_it = updater_schema_data.rbegin();
unsigned int reverse_layer_id = static_cast<unsigned int>(updater_list.size() + testing_layer_count) - 1;
layer_configuration_specific_list::const_reverse_iterator layer_config_it = layer_config_list.rbegin() + 1;
for(std::vector<layer_updater_cuda_smart_ptr>::reverse_iterator it = updater_list.rbegin(); it != updater_list.rend(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++training_speed_data_it, ++output_errors_it, ++net_data_it, --reverse_layer_id, ++layer_config_it)
{
if (it != (updater_list.rend() - 1))
{
(*it)->enqueue_backprop(
*command_stream,
*schema_data_it,
*net_data_it,
input_and_all_buffers_pack_it->second.output_neurons_buffer,
input_and_all_buffers_pack_it->first,
*output_errors_it,
input_and_all_buffers_pack_it->second.input_errors_buffer,
input_and_all_buffers_pack_it->second.additional_buffers,
input_and_all_buffers_pack_it->second.dynamic_memobjects,
updater_entry_count);
std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(reverse_layer_id);
if (dropout_it != layer_to_dropout_rate_map.end())
{
unsigned int offset = offset_list.top();
offset_list.pop();
enqueue_dropout(
*command_stream,
random_uniform_buf,
(input_and_all_buffers_pack_it->second.input_errors_buffer == 0) ? *output_errors_it : input_and_all_buffers_pack_it->second.input_errors_buffer,
dropout_it->second,
mask,
updater_entry_count * layer_config_it->get_neuron_count(),
offset);
}
}
(*it)->enqueue_update_weights(
(it == (updater_list.rend() - 1)) ? input_entry_id : 0,
*command_stream,
*net_data_it,
*schema_data_it,
*training_speed_data_it,
*output_errors_it,
input_and_all_buffers_pack_it->first,
input_and_all_buffers_pack_it->second.additional_buffers,
input_and_all_buffers_pack_it->second.dynamic_memobjects,
updater_entry_count);
weight_vector_bound_map::iterator bound_it = weight_vector_bounds.find(reverse_layer_id);
if (bound_it != weight_vector_bounds.end())
{
const weight_vector_bound& bound = layer_to_weight_vector_bound_map.find(reverse_layer_id)->second;
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers = weight_vector_bound_buffers.find(reverse_layer_id)->second;
bound_it->second->enqueue_normalize_weights(
*command_stream,
bound,
*net_data_it,
additional_buffers,
updater_entry_count);
}
}
}
if (((input_entry_id % 16) == 1) && cuda_config->is_flush_required())
{
cuda_safe_call(cudaEventRecord(data_processed_event, *command_stream));
cuda_safe_call(cudaEventQuery(data_processed_event));
}
} // for(unsigned int input_entry_id
if (profile_mode)
entry_count_updated_in_profile_mode += entries_available_for_processing_count;
for(std::vector<testing_result_smart_ptr>::iterator it = res.begin(); it != res.end(); ++it)
(*it)->entry_count += entries_available_for_processing_count;
if (cuda_config->is_flush_required())
{
cuda_safe_call(cudaEventRecord(data_processed_event, *command_stream));
cuda_safe_call(cudaEventQuery(data_processed_event));
}
} // if (entries_available_for_processing_count > 0)
unsigned int entries_read_count = 0;
if (entries_available_for_copy_in_count > 0)
{
unsigned int entries_to_read_count = std::min<unsigned int>(max_entry_count, entries_available_for_copy_in_count);
while(entries_read_count < entries_to_read_count)
{
bool entry_read = reader.read(
input + (input_neuron_count * entries_read_count * input_neuron_elem_size),
output + (output_neuron_count * entries_read_count));
if (!entry_read)
break;
entries_read_count++;
}
cuda_safe_call(cudaMemcpyAsync(
*(input_buf[current_data_slot]),
input,
entries_read_count * input_neuron_count * input_neuron_elem_size,
cudaMemcpyHostToDevice,
*data_stream));
cuda_safe_call(cudaMemcpyAsync(
*(output_buf[current_data_slot]),
output,
entries_read_count * output_neuron_count * sizeof(float),
cudaMemcpyHostToDevice,
*data_stream));
}
cuda_safe_call(cudaStreamSynchronize(*data_stream));
cuda_safe_call(cudaStreamSynchronize(*command_stream));
entries_available_for_processing_count = entries_read_count;
entries_available_for_copy_in_count -= entries_read_count;
current_data_slot = 1 - current_data_slot;
current_command_slot = 1 - current_command_slot;
if (profile_mode)
entries_available_for_copy_in_count = 0;
}
read_data(net_data, data_list, *command_stream);
std::vector<float> mse_list(output_neuron_count * updater_entry_count);
cuda_safe_call(cudaMemcpyAsync(&(*mse_list.begin()), *mse_buf, mse_list.size() * sizeof(float), cudaMemcpyDeviceToHost, *command_stream));
cuda_safe_call(cudaStreamSynchronize(*command_stream));
for(unsigned int i = 0; i < updater_entry_count; ++i)
std::copy(mse_list.begin() + output_neuron_count * i, mse_list.begin() + output_neuron_count * (i + 1), res[i]->cumulative_mse_list.begin());
return res;
}
void network_updater_cuda::layer_config_list_modified()
{
layer_configuration_specific_list::const_iterator it_conf = layer_config_list.begin();
tester_list.clear();
for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it, ++it_conf)
{
tester_list.push_back(
(*it)->create_tester(
*it_conf,
*(it_conf + 1)));
}
updater_list.clear();
for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it, ++it_conf)
{
updater_list.push_back(
(*it)->create_updater(
*it_conf,
*(it_conf + 1),
(it_conf > layer_config_list.begin() + testing_layer_count),
(it_conf > layer_config_list.begin() + testing_layer_count)));
}
}
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::enqueue_get_training_speed(
const std::vector<network_data_smart_ptr>& training_speed_list,
cudaStream_t stream_id) const
{
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > res;
const network_data_smart_ptr& first_data = training_speed_list[0];
for(unsigned int layer_id = testing_layer_count; layer_id < updater_schemas.size() + testing_layer_count; ++layer_id)
{
std::vector<const_cuda_linear_buffer_device_smart_ptr> buffer_list;
unsigned int subindex = 0;
for(std::vector<std::vector<float> >::iterator it = (*first_data)[layer_id]->begin(); it != (*first_data)[layer_id]->end(); ++it, ++subindex)
{
size_t single_size = it->size();
std::vector<float> pack(single_size * training_speed_list.size());
std::vector<float>::iterator fill_it = pack.begin();
for(std::vector<network_data_smart_ptr>::const_iterator sample_it = training_speed_list.begin(); sample_it != training_speed_list.end(); sample_it++)
{
const std::vector<float>& inp_buf = (*sample_it)->at(layer_id)->at(subindex);
fill_it = std::copy(inp_buf.begin(), inp_buf.end(), fill_it);
}
buffer_list.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(
&(*pack.begin()),
pack.size() * sizeof(float),
stream_id)));
}
res.push_back(buffer_list);
}
return res;
}
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::enqueue_get_data(
const std::vector<network_data_smart_ptr>& data_list,
cudaStream_t stream_id) const
{
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res;
const network_data_smart_ptr& first_data = data_list[0];
for(unsigned int layer_id = testing_layer_count; layer_id < updater_schemas.size() + testing_layer_count; ++layer_id)
{
std::vector<cuda_linear_buffer_device_smart_ptr> buffer_list;
unsigned int subindex = 0;
for(std::vector<std::vector<float> >::iterator it = (*first_data)[layer_id]->begin(); it != (*first_data)[layer_id]->end(); ++it, ++subindex)
{
size_t single_size = it->size();
std::vector<float> pack(single_size * data_list.size());
std::vector<float>::iterator fill_it = pack.begin();
for(std::vector<network_data_smart_ptr>::const_iterator sample_it = data_list.begin(); sample_it != data_list.end(); sample_it++)
{
const std::vector<float>& inp_buf = (*sample_it)->at(layer_id)->at(subindex);
fill_it = std::copy(inp_buf.begin(), inp_buf.end(), fill_it);
}
buffer_list.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(
&(*pack.begin()),
pack.size() * sizeof(float),
stream_id)));
}
res.push_back(buffer_list);
}
return res;
}
void network_updater_cuda::read_data(
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data_list,
std::vector<network_data_smart_ptr>& res,
cudaStream_t stream_id) const
{
const network_data_smart_ptr& first_data = res[0];
unsigned int layer_id = testing_layer_count;
for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator src_it = data_list.begin(); src_it != data_list.end(); ++src_it, ++layer_id)
{
unsigned int subindex = 0;
for(std::vector<cuda_linear_buffer_device_smart_ptr>::iterator src_it2 = src_it->begin(); src_it2 != src_it->end(); ++src_it2, ++subindex)
{
cuda_linear_buffer_device_smart_ptr src = *src_it2;
std::vector<float> pack(src->get_size() / sizeof(float));
cuda_safe_call(cudaMemcpyAsync(&(*pack.begin()), *src, pack.size() * sizeof(float), cudaMemcpyDeviceToHost, stream_id));
cuda_safe_call(cudaStreamSynchronize(stream_id));
std::vector<float>::const_iterator src_buf_it = pack.begin();
for(std::vector<network_data_smart_ptr>::const_iterator sample_it = res.begin(); sample_it != res.end(); sample_it++)
{
std::vector<float>& dst_buf = (*sample_it)->at(layer_id)->at(subindex);
std::copy(src_buf_it, src_buf_it + dst_buf.size(), dst_buf.begin());
src_buf_it += dst_buf.size();
}
}
}
}
void network_updater_cuda::update_buffers_configuration(
buffer_cuda_size_configuration& buffer_configuration,
unsigned int updater_entry_count) const
{
for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = testing_schema_data.begin(); it != testing_schema_data.end(); ++it)
for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2)
buffer_configuration.add_constant_buffer((*it2)->get_size());
for(std::vector<layer_tester_cuda_smart_ptr>::const_iterator it = tester_list.begin(); it != tester_list.end(); ++it)
(*it)->update_buffer_configuration(buffer_configuration);
for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = updater_schema_data.begin(); it != updater_schema_data.end(); ++it)
for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2)
buffer_configuration.add_constant_buffer((*it2)->get_size());
for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it)
(*it)->update_buffer_configuration(buffer_configuration, updater_entry_count);
for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it)
it->second->update_buffer_configuration(buffer_configuration, updater_entry_count);
}
unsigned int network_updater_cuda::get_max_batch_size() const
{
buffer_cuda_size_configuration buffer_configuration;
for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it)
(*it)->update_buffer_configuration(buffer_configuration);
for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it)
it->second->update_buffer_configuration(buffer_configuration);
return cuda_config->get_max_entry_count(buffer_configuration, 0.5F);
}
void network_updater_cuda::enqueue_dropout(
cudaStream_t stream_id,
const_cuda_linear_buffer_device_smart_ptr random_buffer,
cuda_linear_buffer_device_smart_ptr target_buffer,
float dropout_rate,
unsigned int mask,
unsigned int elem_count,
unsigned int offset_in_random_list)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
dropout_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*target_buffer,
*random_buffer,
dropout_rate,
offset_in_random_list,
mask,
elem_count);
}
}
}
|
05eea2d305f16f527eec6b07f6e2afa7cafc6e8d.hip | // !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::GemmSplitKParallel<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
| 05eea2d305f16f527eec6b07f6e2afa7cafc6e8d.cu | #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::GemmSplitKParallel<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
|
9e6a8dfa20784143c216b8891d7fb020be1df223.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file sobel.cu
* @author Alessandro Capotondi
* @date 12 May 2020
* @brief Sobel Filtering
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <opencv2/opencv.hpp>
#include <opencv2/imgcodecs/imgcodecs.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
using namespace std;
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 16
#endif
#ifndef NSTREAMS
#define NSTREAMS 12
#endif
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
static inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
extern "C"
{
#include "utils.h"
}
int FILTER_HOST[3][3] = {{-1, 0, 1},
{-2, 0, 2},
{-1, 0, 1}};
void sobel_host(unsigned char *__restrict__ orig, unsigned char *__restrict__ out, int width, int height)
{
#pragma omp parallel for collapse(2)
for (int y = 1; y < height - 1; y++)
{
for (int x = 1; x < width - 1; x++)
{
int dx = 0, dy = 0;
for (int k = -1; k <= 1; k++)
{
for (int z = -1; z <= 1; z++)
{
dx += FILTER_HOST[k + 1][z + 1] * orig[(y + k) * width + x + z];
dy += FILTER_HOST[z + 1][k + 1] * orig[(y + k) * width + x + z];
}
}
out[y * width + x] = sqrt((float)((dx * dx) + (dy * dy)));
}
}
}
__constant__ int FILTER_GPU[3][3] = {{-1, 0, 1},
{-2, 0, 2},
{-1, 0, 1}};
__global__ void sobel_v1(unsigned char *__restrict__ orig, unsigned char *__restrict__ out, int width, int height)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (j > 0 && i > 0 && j < width - 1 && i < height - 1)
{
int dx = 0, dy = 0;
for (int k = -1; k <= 1; k++)
{
for (int z = -1; z <= 1; z++)
{
dx += FILTER_GPU[k + 1][z + 1] * orig[(i + k) * width + j + z];
dy += FILTER_GPU[z + 1][k + 1] * orig[(i + k) * width + j + z];
}
}
out[i * width + j] = sqrt((float)((dx * dx) + (dy * dy)));
}
}
int main(int argc, char *argv[])
{
int iret = 0;
struct timespec rt[2];
string filename("data/sample.avi");
if (argc > 1)
filename = argv[1];
//Open Video Example
VideoCapture cap(filename);
// Check if camera opened successfully
if (!cap.isOpened())
{
cout << "Error opening video stream or file" << endl;
return -1;
}
int width = cap.get(CAP_PROP_FRAME_WIDTH);
int height = cap.get(CAP_PROP_FRAME_HEIGHT);
int nCh = 3;
// Frame Buffers
Mat frameRGB = Mat::zeros(height, width, CV_8UC3);
Mat frameIn = Mat::zeros(height, width, CV_8UC1);
Mat frameOut = Mat::zeros(height, width, CV_8UC1);
int nFrames = 0;
double time_cnt = 0.0;
while (1)
{
bool lastFrame = cap.read(frameRGB); // read a new frame from video
if (!lastFrame)
break;
cvtColor(frameRGB, frameIn, COLOR_BGR2GRAY);
// Compute CPU Version - Golden Model
clock_gettime(CLOCK_REALTIME, rt + 0);
sobel_host(frameIn.ptr(), frameOut.ptr(), width, height);
clock_gettime(CLOCK_REALTIME, rt + 1);
time_cnt+= (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
nFrames++;
#ifdef DISPLAY
// Show frames
imshow("frameIn", frameIn);
imshow("frameOut", frameOut);
waitKey(1);
#endif
}
printf("Sobel (Host) : %d frames, %9.6f s per-frame (%9.6f fps)\n", nFrames, time_cnt/nFrames, 1/(time_cnt/nFrames));
// CUDA VERSION --------------------------------------------------
//Open Video Example
cap = VideoCapture(filename);
// Check if camera opened successfully
if (!cap.isOpened())
{
cout << "Error opening video stream or file" << endl;
return -1;
}
unsigned char *d_image_in;
unsigned char *d_image_out;
gpuErrchk(hipMalloc((void **)&d_image_in, sizeof(unsigned char) * width * height));
gpuErrchk(hipMalloc((void **)&d_image_out, sizeof(unsigned char) * width * height));
gpuErrchk(hipMemset(d_image_out, 0, sizeof(unsigned char) * width * height));
hipStream_t stream[NSTREAMS];
for(int i = 0; i < NSTREAMS; i++)
hipStreamCreate(&stream[i]);
nFrames = 0;
time_cnt = 0.0;
while (1)
{
bool lastFrame = cap.read(frameRGB); // read a new frame from video
if (!lastFrame)
break;
cvtColor(frameRGB, frameIn, COLOR_BGR2GRAY);
// Compute CPU Version - Golden Model
clock_gettime(CLOCK_REALTIME, rt + 0);
gpuErrchk(hipMemcpyAsync(d_image_in, frameIn.ptr(), sizeof(unsigned char) * width * height, hipMemcpyHostToDevice,stream[nFrames%NSTREAMS]));
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((width + BLOCK_SIZE - 1) / BLOCK_SIZE, (height + BLOCK_SIZE - 1) / BLOCK_SIZE);
hipLaunchKernelGGL(( sobel_v1), dim3(dimGrid), dim3(dimBlock),0,stream[nFrames%NSTREAMS], d_image_in, d_image_out, width, height);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipMemcpyAsync(frameOut.ptr(), d_image_out, sizeof(unsigned char) * width * height, hipMemcpyDeviceToHost,stream[nFrames%NSTREAMS]));
clock_gettime(CLOCK_REALTIME, rt + 1);
time_cnt+= (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
nFrames++;
#ifdef DISPLAY
// Show frames
imshow("frameIn", frameIn);
imshow("frameOut", frameOut);
waitKey(1);
#endif
}
hipDeviceSynchronize();
printf("Sobel (GPU) : %d frames, %9.6f s per-frame (%9.6f fps)\n", nFrames, time_cnt/nFrames, 1/(time_cnt/nFrames));
gpuErrchk(hipFree(d_image_out));
gpuErrchk(hipFree(d_image_in));
for (int i=0; i<NSTREAMS; ++i)
gpuErrchk(hipStreamDestroy(stream[i]));
frameOut.release();
frameIn.release();
frameRGB.release();
cap.release();
return iret;
}
| 9e6a8dfa20784143c216b8891d7fb020be1df223.cu | /*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file sobel.cu
* @author Alessandro Capotondi
* @date 12 May 2020
* @brief Sobel Filtering
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <opencv2/opencv.hpp>
#include <opencv2/imgcodecs/imgcodecs.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
using namespace std;
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 16
#endif
#ifndef NSTREAMS
#define NSTREAMS 12
#endif
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
extern "C"
{
#include "utils.h"
}
int FILTER_HOST[3][3] = {{-1, 0, 1},
{-2, 0, 2},
{-1, 0, 1}};
void sobel_host(unsigned char *__restrict__ orig, unsigned char *__restrict__ out, int width, int height)
{
#pragma omp parallel for collapse(2)
for (int y = 1; y < height - 1; y++)
{
for (int x = 1; x < width - 1; x++)
{
int dx = 0, dy = 0;
for (int k = -1; k <= 1; k++)
{
for (int z = -1; z <= 1; z++)
{
dx += FILTER_HOST[k + 1][z + 1] * orig[(y + k) * width + x + z];
dy += FILTER_HOST[z + 1][k + 1] * orig[(y + k) * width + x + z];
}
}
out[y * width + x] = sqrt((float)((dx * dx) + (dy * dy)));
}
}
}
__constant__ int FILTER_GPU[3][3] = {{-1, 0, 1},
{-2, 0, 2},
{-1, 0, 1}};
__global__ void sobel_v1(unsigned char *__restrict__ orig, unsigned char *__restrict__ out, int width, int height)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (j > 0 && i > 0 && j < width - 1 && i < height - 1)
{
int dx = 0, dy = 0;
for (int k = -1; k <= 1; k++)
{
for (int z = -1; z <= 1; z++)
{
dx += FILTER_GPU[k + 1][z + 1] * orig[(i + k) * width + j + z];
dy += FILTER_GPU[z + 1][k + 1] * orig[(i + k) * width + j + z];
}
}
out[i * width + j] = sqrt((float)((dx * dx) + (dy * dy)));
}
}
int main(int argc, char *argv[])
{
int iret = 0;
struct timespec rt[2];
string filename("data/sample.avi");
if (argc > 1)
filename = argv[1];
//Open Video Example
VideoCapture cap(filename);
// Check if camera opened successfully
if (!cap.isOpened())
{
cout << "Error opening video stream or file" << endl;
return -1;
}
int width = cap.get(CAP_PROP_FRAME_WIDTH);
int height = cap.get(CAP_PROP_FRAME_HEIGHT);
int nCh = 3;
// Frame Buffers
Mat frameRGB = Mat::zeros(height, width, CV_8UC3);
Mat frameIn = Mat::zeros(height, width, CV_8UC1);
Mat frameOut = Mat::zeros(height, width, CV_8UC1);
int nFrames = 0;
double time_cnt = 0.0;
while (1)
{
bool lastFrame = cap.read(frameRGB); // read a new frame from video
if (!lastFrame)
break;
cvtColor(frameRGB, frameIn, COLOR_BGR2GRAY);
// Compute CPU Version - Golden Model
clock_gettime(CLOCK_REALTIME, rt + 0);
sobel_host(frameIn.ptr(), frameOut.ptr(), width, height);
clock_gettime(CLOCK_REALTIME, rt + 1);
time_cnt+= (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
nFrames++;
#ifdef DISPLAY
// Show frames
imshow("frameIn", frameIn);
imshow("frameOut", frameOut);
waitKey(1);
#endif
}
printf("Sobel (Host) : %d frames, %9.6f s per-frame (%9.6f fps)\n", nFrames, time_cnt/nFrames, 1/(time_cnt/nFrames));
// CUDA VERSION --------------------------------------------------
//Open Video Example
cap = VideoCapture(filename);
// Check if camera opened successfully
if (!cap.isOpened())
{
cout << "Error opening video stream or file" << endl;
return -1;
}
unsigned char *d_image_in;
unsigned char *d_image_out;
gpuErrchk(cudaMalloc((void **)&d_image_in, sizeof(unsigned char) * width * height));
gpuErrchk(cudaMalloc((void **)&d_image_out, sizeof(unsigned char) * width * height));
gpuErrchk(cudaMemset(d_image_out, 0, sizeof(unsigned char) * width * height));
cudaStream_t stream[NSTREAMS];
for(int i = 0; i < NSTREAMS; i++)
cudaStreamCreate(&stream[i]);
nFrames = 0;
time_cnt = 0.0;
while (1)
{
bool lastFrame = cap.read(frameRGB); // read a new frame from video
if (!lastFrame)
break;
cvtColor(frameRGB, frameIn, COLOR_BGR2GRAY);
// Compute CPU Version - Golden Model
clock_gettime(CLOCK_REALTIME, rt + 0);
gpuErrchk(cudaMemcpyAsync(d_image_in, frameIn.ptr(), sizeof(unsigned char) * width * height, cudaMemcpyHostToDevice,stream[nFrames%NSTREAMS]));
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((width + BLOCK_SIZE - 1) / BLOCK_SIZE, (height + BLOCK_SIZE - 1) / BLOCK_SIZE);
sobel_v1<<<dimGrid, dimBlock,0,stream[nFrames%NSTREAMS]>>>(d_image_in, d_image_out, width, height);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaMemcpyAsync(frameOut.ptr(), d_image_out, sizeof(unsigned char) * width * height, cudaMemcpyDeviceToHost,stream[nFrames%NSTREAMS]));
clock_gettime(CLOCK_REALTIME, rt + 1);
time_cnt+= (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
nFrames++;
#ifdef DISPLAY
// Show frames
imshow("frameIn", frameIn);
imshow("frameOut", frameOut);
waitKey(1);
#endif
}
cudaDeviceSynchronize();
printf("Sobel (GPU) : %d frames, %9.6f s per-frame (%9.6f fps)\n", nFrames, time_cnt/nFrames, 1/(time_cnt/nFrames));
gpuErrchk(cudaFree(d_image_out));
gpuErrchk(cudaFree(d_image_in));
for (int i=0; i<NSTREAMS; ++i)
gpuErrchk(cudaStreamDestroy(stream[i]));
frameOut.release();
frameIn.release();
frameRGB.release();
cap.release();
return iret;
}
|
60beeb8566f288110cb9402faf4e4f1cbba64915.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <set>
#include <vector>
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/selected_rows_functor.h"
namespace phi {
namespace funcs {
template <typename T>
struct SelectedRowsAdd<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const phi::SelectedRows& input2,
phi::SelectedRows* output) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(
in1_height,
input2.height(),
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
input2.height()));
output->set_height(in1_height);
phi::Vector<int64_t> in1_rows(input1.rows());
auto& in2_rows = input2.rows();
std::vector<int64_t> out_rows;
out_rows.reserve(in1_rows.size() + in2_rows.size());
// concat rows
out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end());
out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end());
output->set_rows(out_rows);
auto* out_value = output->mutable_value();
auto& in1_value = input1.value();
auto& in2_value = input2.value();
auto in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
in2_value.numel() / in2_rows.size(),
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
in2_value.numel() / in2_rows.size()));
PADDLE_ENFORCE_EQ(
in1_row_numel,
out_value->numel() / out_rows.size(),
phi::errors::InvalidArgument(
"The input and oupput width must be equal."
"But received input width = [%d], output width = [%d]",
in1_row_numel,
out_value->numel() / out_rows.size()));
auto* out_data = out_value->data<T>();
auto* in1_data = in1_value.data<T>();
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(paddle::platform::is_gpu_place(in1_place),
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto in2_place = input2.place();
PADDLE_ENFORCE_EQ(paddle::platform::is_gpu_place(in2_place),
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto out_place = context.GetPlace();
PADDLE_ENFORCE_EQ(paddle::platform::is_gpu_place(out_place),
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
paddle::memory::Copy(out_place,
out_data,
in1_place,
in1_data,
in1_value.numel() * sizeof(T),
context.stream());
auto* in2_data = in2_value.data<T>();
paddle::memory::Copy(out_place,
out_data + in1_value.numel(),
in2_place,
in2_data,
in2_value.numel() * sizeof(T),
context.stream());
}
};
template struct SelectedRowsAdd<phi::GPUContext, float>;
template struct SelectedRowsAdd<phi::GPUContext, double>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we can not use
// tensor_out[index] += selected_rows[index]; Instead, we have to use
// AtomicAdd to avoid concurrent write error.
phi::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const phi::DenseTensor& input2,
phi::DenseTensor* output) {
auto in1_height = input1.height();
auto in2_dims = input2.dims();
auto out_dims = output->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument(
"The two inputs height must be equal."
"But received first input height = [%d], first input height = [%d]",
in1_height,
in2_dims[0]));
PADDLE_ENFORCE_EQ(
in1_height,
out_dims[0],
phi::errors::InvalidArgument(
"The input and output height must be equal."
"But received input height = [%d], output height = [%d]",
in1_height,
out_dims[0]));
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2.numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2.numel() / in1_height));
PADDLE_ENFORCE_EQ(
in1_row_numel,
output->numel() / in1_height,
phi::errors::InvalidArgument(
"The input and output width must be equal."
"But received input width = [%d], output width = [%d]",
in1_row_numel,
output->numel() / in1_height));
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2.data<T>();
auto* out_data = output->data<T>();
phi::funcs::SetConstant<phi::GPUContext, T> functor;
functor(context, output, static_cast<T>(0));
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
phi::MixVector<int64_t> mixv_in1_rows(&in1_rows);
hipLaunchKernelGGL(( SelectedRowsAddTensorKernel<T, block_size>)
, dim3(grid), dim3(threads), 0, context.stream(),
in1_data,
mixv_in1_rows.CUDAData(context.GetPlace()),
out_data,
in1_row_numel);
auto out_eigen = EigenVector<T>::Flatten(*output);
auto in2_eigen = EigenVector<T>::Flatten(input2);
out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen;
}
};
template struct SelectedRowsAddTensor<phi::GPUContext, float>;
template struct SelectedRowsAddTensor<phi::GPUContext, double>;
template struct SelectedRowsAdd<phi::GPUContext, phi::dtype::float16>;
template struct SelectedRowsAddTensor<phi::GPUContext, phi::dtype::float16>;
template <typename T>
struct SelectedRowsAddTo<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const int64_t input2_offset,
phi::SelectedRows* input2) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(
in1_height,
input2->height(),
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
input2->height()));
auto& in1_rows = input1.rows();
auto& in2_rows = *(input2->mutable_rows());
auto& in1_value = input1.value();
auto* in2_value = input2->mutable_value();
// concat rows
phi::MixVector<int64_t> mixv_in2_rows(&in2_rows);
if (in1_rows.size()) {
mixv_in2_rows.Extend(in1_rows.begin(), in1_rows.end());
}
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(paddle::platform::is_gpu_place(in1_place),
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto in2_place = input2->place();
PADDLE_ENFORCE_EQ(paddle::platform::is_gpu_place(in1_place),
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto* in1_data = in1_value.data<T>();
auto* in2_data = in2_value->data<T>();
paddle::memory::Copy(in2_place,
in2_data + input2_offset,
in1_place,
in1_data,
in1_value.numel() * sizeof(T),
context.stream());
}
};
template struct SelectedRowsAddTo<phi::GPUContext, float>;
template struct SelectedRowsAddTo<phi::GPUContext, double>;
template struct SelectedRowsAddTo<phi::GPUContext, int>;
template struct SelectedRowsAddTo<phi::GPUContext, int64_t>;
template struct SelectedRowsAddTo<phi::GPUContext, phi::dtype::float16>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddToTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
phi::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddToTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
phi::DenseTensor* input2) {
auto in1_height = input1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
in2_dims[0]));
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2->numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2->numel() / in1_height));
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
phi::MixVector<int64_t> mixv_in1_rows(&in1_rows);
hipLaunchKernelGGL(( SelectedRowsAddToTensorKernel<T, block_size>)
, dim3(grid), dim3(threads), 0, context.stream(),
in1_data,
mixv_in1_rows.CUDAData(context.GetPlace()),
in2_data,
in1_row_numel);
}
};
template struct SelectedRowsAddToTensor<phi::GPUContext, float>;
template struct SelectedRowsAddToTensor<phi::GPUContext, double>;
template struct SelectedRowsAddToTensor<phi::GPUContext, int>;
template struct SelectedRowsAddToTensor<phi::GPUContext, int64_t>;
template struct SelectedRowsAddToTensor<phi::GPUContext, phi::dtype::float16>;
namespace scatter {
template <typename T, int block_size>
__global__ void MergeAddKernel(const T* input,
const int64_t* input_rows,
T* out,
const int64_t* out_rows,
size_t out_rows_size,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
__shared__ size_t out_idx;
if (tid == 0) {
for (size_t i = 0; i < out_rows_size; i++) {
if (input_rows[ty] == out_rows[i]) {
out_idx = i;
}
}
}
__syncthreads();
input += ty * row_numel;
out += out_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
phi::CudaAtomicAdd(out + index, input[index]);
}
}
template <typename DeviceContext, typename T>
struct MergeAddImpl {
phi::SelectedRows operator()(const DeviceContext& context,
const phi::SelectedRows& input,
const bool sorted_result = false) {
phi::SelectedRows out;
(*this)(context, input, &out);
return out;
}
void operator()(const DeviceContext& context,
const phi::SelectedRows& input,
phi::SelectedRows* output,
const bool sorted_result = false) {
phi::Vector<int64_t> input_rows(input.rows());
if (input_rows.size() == 0) {
return;
}
phi::SelectedRows& out = *output;
std::set<int64_t> row_set(input_rows.begin(), input_rows.end());
std::vector<int64_t> merge_rows_cpu(row_set.begin(), row_set.end());
phi::Vector<int64_t> merge_rows(merge_rows_cpu);
auto input_width = input.value().dims()[1];
out.set_rows(merge_rows);
out.set_height(input.height());
DenseTensor* out_tensor = out.mutable_value();
out_tensor->Resize(
phi::make_ddim({static_cast<int64_t>(merge_rows.size()), input_width}));
context.template Alloc<T>(out_tensor);
phi::funcs::SetConstant<DeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
auto* input_data = input.value().data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid1(input_rows.size(), 1);
phi::MixVector<int64_t> mix_vector_input(&input_rows);
phi::MixVector<int64_t> mix_vector_out(out.mutable_rows());
hipLaunchKernelGGL(( MergeAddKernel<T, 256>), dim3(grid1), dim3(threads), 0, context.stream(),
input_data,
mix_vector_input.CUDAData(context.GetPlace()),
out_data,
mix_vector_out.CUDAMutableData(context.GetPlace()),
out.rows().size(),
input_width);
mix_vector_out.CopyToCPU();
}
void operator()(const DeviceContext& context,
const std::vector<const phi::SelectedRows*>& inputs,
phi::SelectedRows* output,
const bool sorted_result = false) {
if (inputs.size() == 0) {
VLOG(3) << "no input! return";
return;
}
const phi::SelectedRows* has_value_input = nullptr;
for (auto* in : inputs) {
if (in->rows().size() > 0) {
has_value_input = in;
break;
}
}
if (has_value_input == nullptr) {
VLOG(3) << "no input has value! just return" << std::endl;
return;
}
auto input_width = has_value_input->value().dims()[1];
auto input_height = has_value_input->height();
phi::SelectedRows& out = *output;
std::set<int64_t> merged_row_set;
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
PADDLE_ENFORCE_EQ(
input_width,
input->value().dims()[1],
phi::errors::InvalidArgument("All input should have same "
"dimension except for the first one."));
PADDLE_ENFORCE_EQ(
input_height,
input->height(),
phi::errors::InvalidArgument("All input should have same height."));
merged_row_set.insert(input->rows().begin(), input->rows().end());
}
std::vector<int64_t> merge_rows_cpu(merged_row_set.begin(),
merged_row_set.end());
phi::Vector<int64_t> merge_rows(merge_rows_cpu);
out.set_rows(merge_rows);
out.set_height(input_height);
DenseTensor* out_tensor = out.mutable_value();
out_tensor->Resize(
phi::make_ddim({static_cast<int64_t>(merge_rows.size()), input_width}));
context.template Alloc<T>(out_tensor);
phi::funcs::SetConstant<DeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
auto* input_data = input->value().data<T>();
auto& input_rows = input->rows();
dim3 grid1(input_rows.size(), 1);
phi::MixVector<int64_t> mix_vector_input(&input_rows);
phi::MixVector<int64_t> mix_vector_out(out.mutable_rows());
hipLaunchKernelGGL(( MergeAddKernel<T, 256>), dim3(grid1), dim3(threads), 0, context.stream(),
input_data,
mix_vector_input.CUDAData(context.GetPlace()),
out_data,
mix_vector_out.CUDAMutableData(context.GetPlace()),
out.rows().size(),
input_width);
mix_vector_out.CopyToCPU();
}
}
};
template <typename T>
struct MergeAdd<phi::GPUContext, T> {
// unary functor, merge by adding duplicated rows in
// the input SelectedRows object.
phi::SelectedRows operator()(const phi::GPUContext& context,
const phi::SelectedRows& input,
const bool sorted_result) {
return MergeAddImpl<phi::GPUContext, T>()(context, input, sorted_result);
}
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input,
phi::SelectedRows* output,
const bool sorted_result) {
MergeAddImpl<phi::GPUContext, T>()(context, input, output, sorted_result);
}
void operator()(const phi::GPUContext& context,
const std::vector<const phi::SelectedRows*>& inputs,
phi::SelectedRows* output,
const bool sorted_result) {
MergeAddImpl<phi::GPUContext, T>()(context, inputs, output, sorted_result);
}
};
#define TEMPLATE_SPECIALIZED_FOR_MERGEADD(dtype) \
template struct MergeAddImpl<phi::GPUContext, dtype>; \
template struct MergeAdd<phi::GPUContext, dtype>;
TEMPLATE_SPECIALIZED_FOR_MERGEADD(float)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(double)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(int)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(int64_t)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::float16)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::bfloat16)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::complex<float>)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::complex<double>)
template <typename T, int block_size>
__global__ void UpdateToTensorKernel(const T* selected_rows,
const int64_t* rows,
const ScatterOps& op,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
// FIXME(typhoonzero): use macro fix the below messy code.
switch (op) {
case ScatterOps::ASSIGN:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index];
}
break;
case ScatterOps::ADD:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] += selected_rows[index];
}
break;
case ScatterOps::SUB:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] -= selected_rows[index];
}
break;
case ScatterOps::SUBBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] - tensor_out[index];
}
break;
case ScatterOps::MUL:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] *= selected_rows[index];
}
break;
case ScatterOps::DIV:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] /= selected_rows[index];
}
break;
case ScatterOps::DIVBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] / tensor_out[index];
}
break;
}
}
template <typename T>
struct UpdateToTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const ScatterOps& op,
const phi::SelectedRows& input1,
DenseTensor* input2) {
// NOTE: Use SelectedRowsAddToTensor for better performance
// no additional MergeAdd called.
MergeAdd<phi::GPUContext, T> merge_func;
auto merged_in1 = merge_func(context, input1);
auto in1_height = merged_in1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
in2_dims[0]));
auto& in1_value = merged_in1.value();
auto& in1_rows = merged_in1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2->numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2->numel() / in1_height));
auto* in1_data = in1_value.template data<T>();
auto* in2_data = input2->data<T>();
dim3 threads(phi::PADDLE_CUDA_NUM_THREADS, 1);
dim3 grid(in1_rows.size(), 1);
hipLaunchKernelGGL(( UpdateToTensorKernel<T, phi::PADDLE_CUDA_NUM_THREADS>)
, dim3(grid), dim3(threads), 0, context.stream(),
in1_data, in1_rows.cuda_data(), op, in2_data, in1_row_numel);
}
};
} // namespace scatter
} // namespace funcs
} // namespace phi
| 60beeb8566f288110cb9402faf4e4f1cbba64915.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <set>
#include <vector>
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/selected_rows_functor.h"
namespace phi {
namespace funcs {
template <typename T>
struct SelectedRowsAdd<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const phi::SelectedRows& input2,
phi::SelectedRows* output) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(
in1_height,
input2.height(),
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
input2.height()));
output->set_height(in1_height);
phi::Vector<int64_t> in1_rows(input1.rows());
auto& in2_rows = input2.rows();
std::vector<int64_t> out_rows;
out_rows.reserve(in1_rows.size() + in2_rows.size());
// concat rows
out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end());
out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end());
output->set_rows(out_rows);
auto* out_value = output->mutable_value();
auto& in1_value = input1.value();
auto& in2_value = input2.value();
auto in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
in2_value.numel() / in2_rows.size(),
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
in2_value.numel() / in2_rows.size()));
PADDLE_ENFORCE_EQ(
in1_row_numel,
out_value->numel() / out_rows.size(),
phi::errors::InvalidArgument(
"The input and oupput width must be equal."
"But received input width = [%d], output width = [%d]",
in1_row_numel,
out_value->numel() / out_rows.size()));
auto* out_data = out_value->data<T>();
auto* in1_data = in1_value.data<T>();
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(paddle::platform::is_gpu_place(in1_place),
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto in2_place = input2.place();
PADDLE_ENFORCE_EQ(paddle::platform::is_gpu_place(in2_place),
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto out_place = context.GetPlace();
PADDLE_ENFORCE_EQ(paddle::platform::is_gpu_place(out_place),
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
paddle::memory::Copy(out_place,
out_data,
in1_place,
in1_data,
in1_value.numel() * sizeof(T),
context.stream());
auto* in2_data = in2_value.data<T>();
paddle::memory::Copy(out_place,
out_data + in1_value.numel(),
in2_place,
in2_data,
in2_value.numel() * sizeof(T),
context.stream());
}
};
template struct SelectedRowsAdd<phi::GPUContext, float>;
template struct SelectedRowsAdd<phi::GPUContext, double>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we can not use
// tensor_out[index] += selected_rows[index]; Instead, we have to use
// AtomicAdd to avoid concurrent write error.
phi::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const phi::DenseTensor& input2,
phi::DenseTensor* output) {
auto in1_height = input1.height();
auto in2_dims = input2.dims();
auto out_dims = output->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument(
"The two inputs height must be equal."
"But received first input height = [%d], first input height = [%d]",
in1_height,
in2_dims[0]));
PADDLE_ENFORCE_EQ(
in1_height,
out_dims[0],
phi::errors::InvalidArgument(
"The input and output height must be equal."
"But received input height = [%d], output height = [%d]",
in1_height,
out_dims[0]));
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2.numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2.numel() / in1_height));
PADDLE_ENFORCE_EQ(
in1_row_numel,
output->numel() / in1_height,
phi::errors::InvalidArgument(
"The input and output width must be equal."
"But received input width = [%d], output width = [%d]",
in1_row_numel,
output->numel() / in1_height));
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2.data<T>();
auto* out_data = output->data<T>();
phi::funcs::SetConstant<phi::GPUContext, T> functor;
functor(context, output, static_cast<T>(0));
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
phi::MixVector<int64_t> mixv_in1_rows(&in1_rows);
SelectedRowsAddTensorKernel<T, block_size>
<<<grid, threads, 0, context.stream()>>>(
in1_data,
mixv_in1_rows.CUDAData(context.GetPlace()),
out_data,
in1_row_numel);
auto out_eigen = EigenVector<T>::Flatten(*output);
auto in2_eigen = EigenVector<T>::Flatten(input2);
out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen;
}
};
template struct SelectedRowsAddTensor<phi::GPUContext, float>;
template struct SelectedRowsAddTensor<phi::GPUContext, double>;
template struct SelectedRowsAdd<phi::GPUContext, phi::dtype::float16>;
template struct SelectedRowsAddTensor<phi::GPUContext, phi::dtype::float16>;
template <typename T>
struct SelectedRowsAddTo<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const int64_t input2_offset,
phi::SelectedRows* input2) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(
in1_height,
input2->height(),
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
input2->height()));
auto& in1_rows = input1.rows();
auto& in2_rows = *(input2->mutable_rows());
auto& in1_value = input1.value();
auto* in2_value = input2->mutable_value();
// concat rows
phi::MixVector<int64_t> mixv_in2_rows(&in2_rows);
if (in1_rows.size()) {
mixv_in2_rows.Extend(in1_rows.begin(), in1_rows.end());
}
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(paddle::platform::is_gpu_place(in1_place),
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto in2_place = input2->place();
PADDLE_ENFORCE_EQ(paddle::platform::is_gpu_place(in1_place),
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto* in1_data = in1_value.data<T>();
auto* in2_data = in2_value->data<T>();
paddle::memory::Copy(in2_place,
in2_data + input2_offset,
in1_place,
in1_data,
in1_value.numel() * sizeof(T),
context.stream());
}
};
template struct SelectedRowsAddTo<phi::GPUContext, float>;
template struct SelectedRowsAddTo<phi::GPUContext, double>;
template struct SelectedRowsAddTo<phi::GPUContext, int>;
template struct SelectedRowsAddTo<phi::GPUContext, int64_t>;
template struct SelectedRowsAddTo<phi::GPUContext, phi::dtype::float16>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddToTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
phi::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddToTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
phi::DenseTensor* input2) {
auto in1_height = input1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
in2_dims[0]));
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2->numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2->numel() / in1_height));
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
phi::MixVector<int64_t> mixv_in1_rows(&in1_rows);
SelectedRowsAddToTensorKernel<T, block_size>
<<<grid, threads, 0, context.stream()>>>(
in1_data,
mixv_in1_rows.CUDAData(context.GetPlace()),
in2_data,
in1_row_numel);
}
};
template struct SelectedRowsAddToTensor<phi::GPUContext, float>;
template struct SelectedRowsAddToTensor<phi::GPUContext, double>;
template struct SelectedRowsAddToTensor<phi::GPUContext, int>;
template struct SelectedRowsAddToTensor<phi::GPUContext, int64_t>;
template struct SelectedRowsAddToTensor<phi::GPUContext, phi::dtype::float16>;
namespace scatter {
template <typename T, int block_size>
__global__ void MergeAddKernel(const T* input,
const int64_t* input_rows,
T* out,
const int64_t* out_rows,
size_t out_rows_size,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
__shared__ size_t out_idx;
if (tid == 0) {
for (size_t i = 0; i < out_rows_size; i++) {
if (input_rows[ty] == out_rows[i]) {
out_idx = i;
}
}
}
__syncthreads();
input += ty * row_numel;
out += out_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
phi::CudaAtomicAdd(out + index, input[index]);
}
}
template <typename DeviceContext, typename T>
struct MergeAddImpl {
phi::SelectedRows operator()(const DeviceContext& context,
const phi::SelectedRows& input,
const bool sorted_result = false) {
phi::SelectedRows out;
(*this)(context, input, &out);
return out;
}
void operator()(const DeviceContext& context,
const phi::SelectedRows& input,
phi::SelectedRows* output,
const bool sorted_result = false) {
phi::Vector<int64_t> input_rows(input.rows());
if (input_rows.size() == 0) {
return;
}
phi::SelectedRows& out = *output;
std::set<int64_t> row_set(input_rows.begin(), input_rows.end());
std::vector<int64_t> merge_rows_cpu(row_set.begin(), row_set.end());
phi::Vector<int64_t> merge_rows(merge_rows_cpu);
auto input_width = input.value().dims()[1];
out.set_rows(merge_rows);
out.set_height(input.height());
DenseTensor* out_tensor = out.mutable_value();
out_tensor->Resize(
phi::make_ddim({static_cast<int64_t>(merge_rows.size()), input_width}));
context.template Alloc<T>(out_tensor);
phi::funcs::SetConstant<DeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
auto* input_data = input.value().data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid1(input_rows.size(), 1);
phi::MixVector<int64_t> mix_vector_input(&input_rows);
phi::MixVector<int64_t> mix_vector_out(out.mutable_rows());
MergeAddKernel<T, 256><<<grid1, threads, 0, context.stream()>>>(
input_data,
mix_vector_input.CUDAData(context.GetPlace()),
out_data,
mix_vector_out.CUDAMutableData(context.GetPlace()),
out.rows().size(),
input_width);
mix_vector_out.CopyToCPU();
}
void operator()(const DeviceContext& context,
const std::vector<const phi::SelectedRows*>& inputs,
phi::SelectedRows* output,
const bool sorted_result = false) {
if (inputs.size() == 0) {
VLOG(3) << "no input! return";
return;
}
const phi::SelectedRows* has_value_input = nullptr;
for (auto* in : inputs) {
if (in->rows().size() > 0) {
has_value_input = in;
break;
}
}
if (has_value_input == nullptr) {
VLOG(3) << "no input has value! just return" << std::endl;
return;
}
auto input_width = has_value_input->value().dims()[1];
auto input_height = has_value_input->height();
phi::SelectedRows& out = *output;
std::set<int64_t> merged_row_set;
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
PADDLE_ENFORCE_EQ(
input_width,
input->value().dims()[1],
phi::errors::InvalidArgument("All input should have same "
"dimension except for the first one."));
PADDLE_ENFORCE_EQ(
input_height,
input->height(),
phi::errors::InvalidArgument("All input should have same height."));
merged_row_set.insert(input->rows().begin(), input->rows().end());
}
std::vector<int64_t> merge_rows_cpu(merged_row_set.begin(),
merged_row_set.end());
phi::Vector<int64_t> merge_rows(merge_rows_cpu);
out.set_rows(merge_rows);
out.set_height(input_height);
DenseTensor* out_tensor = out.mutable_value();
out_tensor->Resize(
phi::make_ddim({static_cast<int64_t>(merge_rows.size()), input_width}));
context.template Alloc<T>(out_tensor);
phi::funcs::SetConstant<DeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
auto* input_data = input->value().data<T>();
auto& input_rows = input->rows();
dim3 grid1(input_rows.size(), 1);
phi::MixVector<int64_t> mix_vector_input(&input_rows);
phi::MixVector<int64_t> mix_vector_out(out.mutable_rows());
MergeAddKernel<T, 256><<<grid1, threads, 0, context.stream()>>>(
input_data,
mix_vector_input.CUDAData(context.GetPlace()),
out_data,
mix_vector_out.CUDAMutableData(context.GetPlace()),
out.rows().size(),
input_width);
mix_vector_out.CopyToCPU();
}
}
};
template <typename T>
struct MergeAdd<phi::GPUContext, T> {
// unary functor, merge by adding duplicated rows in
// the input SelectedRows object.
phi::SelectedRows operator()(const phi::GPUContext& context,
const phi::SelectedRows& input,
const bool sorted_result) {
return MergeAddImpl<phi::GPUContext, T>()(context, input, sorted_result);
}
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input,
phi::SelectedRows* output,
const bool sorted_result) {
MergeAddImpl<phi::GPUContext, T>()(context, input, output, sorted_result);
}
void operator()(const phi::GPUContext& context,
const std::vector<const phi::SelectedRows*>& inputs,
phi::SelectedRows* output,
const bool sorted_result) {
MergeAddImpl<phi::GPUContext, T>()(context, inputs, output, sorted_result);
}
};
#define TEMPLATE_SPECIALIZED_FOR_MERGEADD(dtype) \
template struct MergeAddImpl<phi::GPUContext, dtype>; \
template struct MergeAdd<phi::GPUContext, dtype>;
TEMPLATE_SPECIALIZED_FOR_MERGEADD(float)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(double)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(int)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(int64_t)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::float16)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::bfloat16)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::complex<float>)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::complex<double>)
template <typename T, int block_size>
__global__ void UpdateToTensorKernel(const T* selected_rows,
const int64_t* rows,
const ScatterOps& op,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
// FIXME(typhoonzero): use macro fix the below messy code.
switch (op) {
case ScatterOps::ASSIGN:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index];
}
break;
case ScatterOps::ADD:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] += selected_rows[index];
}
break;
case ScatterOps::SUB:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] -= selected_rows[index];
}
break;
case ScatterOps::SUBBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] - tensor_out[index];
}
break;
case ScatterOps::MUL:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] *= selected_rows[index];
}
break;
case ScatterOps::DIV:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] /= selected_rows[index];
}
break;
case ScatterOps::DIVBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] / tensor_out[index];
}
break;
}
}
template <typename T>
struct UpdateToTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const ScatterOps& op,
const phi::SelectedRows& input1,
DenseTensor* input2) {
// NOTE: Use SelectedRowsAddToTensor for better performance
// no additional MergeAdd called.
MergeAdd<phi::GPUContext, T> merge_func;
auto merged_in1 = merge_func(context, input1);
auto in1_height = merged_in1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
in2_dims[0]));
auto& in1_value = merged_in1.value();
auto& in1_rows = merged_in1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2->numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2->numel() / in1_height));
auto* in1_data = in1_value.template data<T>();
auto* in2_data = input2->data<T>();
dim3 threads(phi::PADDLE_CUDA_NUM_THREADS, 1);
dim3 grid(in1_rows.size(), 1);
UpdateToTensorKernel<T, phi::PADDLE_CUDA_NUM_THREADS>
<<<grid, threads, 0, context.stream()>>>(
in1_data, in1_rows.cuda_data(), op, in2_data, in1_row_numel);
}
};
} // namespace scatter
} // namespace funcs
} // namespace phi
|
0b469134d3d26747e6e0a0ad2518ac2d772f8efe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2012, Thomas Schutzmeier
// FreeBSD License
// See https://github.com/unvirtual/cukd/blob/master/LICENSE
#include <thrust/sequence.h>
#include <thrust/scan.h>
#include <thrust/copy.h>
#include <thrust/remove.h>
#include "node_chunk_array.h"
#include "../primitives.h"
#include "../utils.h"
#include "../algorithms/reduction.h"
namespace cukd {
namespace device {
__global__
void
create_chunks_kernel(device::NodeChunkArray arr, int* offsets, int n_nodes,
int max_chunk_size);
__global__
void
tag_triangles_left_right_kernel(device::NodeChunkArray active, int n_elements,
int * tag);
__global__
void
element_clipping_kernel(int n_parent_nodes, device::NodeChunkArray active,
int* p_split_axis, float* p_split_pos,
DevTriangleArray tris, int n_left);
__global__
void
update_parent_aabbs_kernel(int n_nodes, device::NodeChunkArray active,
device::NodeChunkArray next);
__global__
void
tag_triangles_by_node_tag_kernel(device::NodeChunkArray nca, int* node_tags,
int* tags);
__global__
void
element_aabb_boundary_planes_kernel(device::NodeChunkArray nca, int nca_n_nodes,
float* boundaries, int* dirs);
__global__
void
determine_empty_space_cut_kernel(int dir, int n_nodes,
DevAABBArray parent_aabb,
DevAABBArray node_aabb,
int* cut_dir);
} // namespace device
/**********************************************************************************
*
* NodeChunkArray implementation
*
**********************************************************************************/
typedef thrust::tuple<int, int, float, AABBArray::AABBTuple> NodeTuple;
typedef thrust::tuple<thrust::device_vector<int>::iterator,
thrust::device_vector<int>::iterator,
thrust::device_vector<float>::iterator,
thrust::device_vector<int>::iterator,
AABBArray::AABBIterator, AABBArray::AABBIterator> NodeIteratorTuple;
NodeChunkArray::NodeChunkArray() : n_chunks_(0) {}
void
NodeChunkArray::init_root_node(int n_elements, AABBArray & tri_aabbs,
const UAABB & root_aabb) {
resize_nodes(1);
resize_elements(n_elements);
tri_aabbs.copy(triangle_aabbs);
thrust::sequence(element_idx.begin(), element_idx.end());
node_size.set(0, n_elements_);
node_element_first_idx.set(0,0);
parent_aabb.minima.set(0, root_aabb.minimum);
parent_aabb.maxima.set(0, root_aabb.maximum);
depth.set(0,0);
}
void
NodeChunkArray::empty_space_tags(DevVector<int> & cut_dirs,
DevVector<int> & n_empty_nodes) {
// Determine if empty space cuts for the current nodes in
// the active lsit are possible.
cut_dirs.resize(n_nodes_ + 1, 0);
n_empty_nodes.resize(n_nodes_ + 1);
DevAABBArray dev_parent_aabb = parent_aabb.dev_array();
DevAABBArray dev_node_aabb = node_aabb.dev_array();
for(int axis = 0; axis < 3; ++axis)
determine_empty_space(n_nodes_, axis,
dev_parent_aabb, dev_node_aabb,
cut_dirs.pointer());
// Compute the number of empty split nodes for each node.
// n_empty_nodes[n_nodes + 1] is the total number of empty split
// nodes at this pass.
CountBitsFunctor countbits;
thrust::exclusive_scan(
thrust::make_transform_iterator(cut_dirs.begin(), countbits),
thrust::make_transform_iterator(cut_dirs.end(), countbits),
n_empty_nodes.begin());
};
void
NodeChunkArray::divide_in_chunks() {
// compute the number of chunks for each node, such that each
// chunk holds at most MAX_ELEMENTS_PER_CHUNK elements
n_chunks_per_node.clear();
n_chunks_per_node.resize(n_nodes_);
thrust::transform(node_size.begin(),
node_size.begin() + n_nodes_,
n_chunks_per_node.begin(),
IntegerDivide(MAX_ELEMENTS_PER_CHUNK));
int n_new_chunks = thrust::reduce(n_chunks_per_node.begin(),
n_chunks_per_node.end());
resize_chunks(n_new_chunks);
// compute the indices to the element list per chunk
first_chunk_idx.clear();
first_chunk_idx.resize(n_nodes_);
thrust::exclusive_scan(n_chunks_per_node.begin(),
n_chunks_per_node.end(),
first_chunk_idx.begin());
dim3 blocks = dim3(256, 1, 1);
dim3 grid = dim3(IntegerDivide(blocks.x)(n_nodes_),1,1);
hipLaunchKernelGGL(( device::create_chunks_kernel), dim3(grid), dim3(blocks), 0, 0, dev_array(),
first_chunk_idx.pointer(),
n_nodes_, MAX_ELEMENTS_PER_CHUNK);
CUT_CHECK_ERROR("create_chunks_kernel failed");
}
void
NodeChunkArray::chunk_node_reduce_aabbs() {
int* first_idx_ptr = chunk_element_first_idx.pointer();
int* size_ptr = chunk_size.pointer();
DevAABBArray tri_aabb = triangle_aabbs.dev_array();
DevAABBArray c_aabb = chunk_aabb.dev_array();
chunk_reduce<256, UFloat4, MinReductionMethod<UFloat4> >(
tri_aabb.minima, c_aabb.minima,
n_chunks_, first_idx_ptr, size_ptr);
chunk_reduce<256, UFloat4, MaxReductionMethod<UFloat4> >(
tri_aabb.maxima, c_aabb.maxima,
n_chunks_, first_idx_ptr, size_ptr);
out_keys.clear();
out_keys.resize(n_nodes());
thrust::equal_to<int> equal_keys;
thrust::reduce_by_key(node_idx.begin(), node_idx.begin() + n_chunks_,
chunk_aabb.begin(), out_keys.begin(),
node_aabb.begin(),
equal_keys, AABBArray::MinMax());
}
int
NodeChunkArray::append_by_tag(NodeChunkArray & nca, int new_nodes,
DevVector<int> & node_tags,
DevVector<int> & element_tags) {
element_tags.resize(nca.n_elements());
nca.divide_in_chunks();
nca.chunk_node_reduce_aabbs();
dim3 grid(nca.n_chunks(), 1, 1);
dim3 blocks(256,1,1);
hipLaunchKernelGGL(( device::tag_triangles_by_node_tag_kernel), dim3(grid), dim3(blocks), 0, 0, nca.dev_array(),
node_tags.pointer(),
element_tags.pointer());
CUT_CHECK_ERROR("tag_triangles_by_node_tag_kernel failed");
int n_old_nodes = n_nodes();
int n_old_elements = n_elements();
resize_nodes(n_old_nodes + new_nodes);
// copy nodes
NodeIteratorTuple begin =
thrust::make_tuple(nca.node_size.begin(),
nca.split_axis.begin(),
nca.split_position.begin(),
nca.depth.begin(),
nca.parent_aabb.begin(),
nca.node_aabb.begin());
NodeIteratorTuple end =
thrust::make_tuple(nca.node_size.end(),
nca.split_axis.end(),
nca.split_position.end(),
nca.depth.end(),
nca.parent_aabb.end(),
nca.node_aabb.end());
NodeIteratorTuple result =
thrust::make_tuple(node_size.begin() + n_old_nodes,
split_axis.begin() + n_old_nodes,
split_position.begin() + n_old_nodes,
depth.begin() + n_old_nodes,
parent_aabb.begin() + n_old_nodes,
node_aabb.begin() + n_old_nodes);
IsNonZero<int> is_non_zero;
thrust::copy_if(thrust::make_zip_iterator(begin),
thrust::make_zip_iterator(end),
node_tags.begin(),
thrust::make_zip_iterator(result),
is_non_zero);
int new_elems = thrust::reduce(node_size.begin() + n_old_nodes, node_size.end());
resize_elements(n_old_elements + new_elems);
// now copy the elements
thrust::copy_if(
thrust::make_zip_iterator(thrust::make_tuple(nca.element_idx.begin(), nca.triangle_aabbs.begin())),
thrust::make_zip_iterator(thrust::make_tuple(nca.element_idx.end(), nca.triangle_aabbs.end())),
element_tags.begin(),
thrust::make_zip_iterator(thrust::make_tuple(element_idx.begin() + n_old_elements,
triangle_aabbs.begin() + n_old_elements)),
is_non_zero);
// nodes complete, compute first element indices
update_node_element_first_idx();
return new_elems;
}
void NodeChunkArray::remove_by_tag(DevVector<int> & node_tags,
DevVector<int> & element_tags,
int n_removed_nodes, int n_removed_elements) {
// remove nodes
NodeIteratorTuple begin =
thrust::make_tuple(node_size.begin(),
split_axis.begin(),
split_position.begin(),
depth.begin(),
parent_aabb.begin(),
node_aabb.begin());
NodeIteratorTuple end =
thrust::make_tuple(node_size.end(),
split_axis.end(),
split_position.end(),
depth.end(),
parent_aabb.end(),
node_aabb.end());
thrust::remove_if(thrust::make_zip_iterator(begin),
thrust::make_zip_iterator(end),
node_tags.begin(), IsNonZero<int>());
resize_nodes(node_size.size() - n_removed_nodes);
// remove elements
thrust::remove_if(thrust::make_zip_iterator(
thrust::make_tuple(element_idx.begin(), triangle_aabbs.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(element_idx.end(), triangle_aabbs.end())),
element_tags.begin(), IsNonZero<int>());
resize_elements(element_idx.size() - n_removed_elements);
update_node_element_first_idx();
}
void
NodeChunkArray::compact_elements_by_tag(DevVector<int> & tags, int start_tag_index,
int end_tag_index, int start_element_index,
NodeChunkArray & nca) {
typedef thrust::tuple<int, int, AABBArray::AABBTuple> TagElementAABBTuple;
typedef thrust::tuple<thrust::device_vector<int>::iterator,
thrust::device_vector<int>::iterator,
AABBArray::AABBIterator> IteratorTuple;
DevVector<int> temp;
temp.resize(tags.size());
FirstIsValue<TagElementAABBTuple, int> first_is_value(1);
IteratorTuple begin =
thrust::make_tuple(tags.begin() + start_tag_index,
element_idx.begin(),
triangle_aabbs.begin());
IteratorTuple end =
thrust::make_tuple(tags.begin() + end_tag_index,
element_idx.end(),
triangle_aabbs.end());
IteratorTuple result =
thrust::make_tuple(temp.begin(),
nca.element_idx.begin() + start_element_index,
nca.triangle_aabbs.begin() + start_element_index);
thrust::copy_if(
thrust::make_zip_iterator(begin), thrust::make_zip_iterator(end),
thrust::make_zip_iterator(result), first_is_value);
}
void NodeChunkArray::update_node_element_first_idx() {
thrust::exclusive_scan(node_size.begin(), node_size.end(),
node_element_first_idx.begin());
}
device::NodeChunkArray
NodeChunkArray::dev_array() {
device::NodeChunkArray dev_node_chunk_array;
dev_node_chunk_array.na = NodeArray::dev_array();
dev_node_chunk_array.node_idx = node_idx.pointer();
dev_node_chunk_array.chunk_size = chunk_size.pointer();
dev_node_chunk_array.chunk_element_first_idx = chunk_element_first_idx.pointer();
dev_node_chunk_array.node_aabb = node_aabb.dev_array();
dev_node_chunk_array.chunk_aabb = chunk_aabb.dev_array();
dev_node_chunk_array.parent_aabb = parent_aabb.dev_array();
dev_node_chunk_array.triangle_aabb = triangle_aabbs.dev_array();
return dev_node_chunk_array;
}
void
NodeChunkArray::tag_triangles_left_right(DevVector<int> & tags) {
dim3 grid(n_chunks(), 1, 1);
dim3 blocks(256,1,1);
hipLaunchKernelGGL(( device::tag_triangles_left_right_kernel), dim3(grid), dim3(blocks), 0, 0, dev_array(),
n_elements(),
tags.pointer());
CUT_CHECK_ERROR("tag_triangles_left_right_kernel failed");
}
void
NodeChunkArray::element_clipping(DevVector<int> & split_axis,
DevVector<float> & split_pos,
TriangleArray & tris, int n_left) {
dim3 grid(n_chunks(), 1, 1);
dim3 blocks(256,1,1);
hipLaunchKernelGGL(( device::element_clipping_kernel), dim3(grid), dim3(blocks), 0, 0, split_pos.size(),
dev_array(),
split_axis.pointer(),
split_pos.pointer(),
tris.dev_array(), n_left);
CUT_CHECK_ERROR("element_clipping_kernel failed");
}
void
NodeChunkArray::element_aabb_boundary_planes(DevVector<float> & boundaries,
DevVector<int> & dirs) {
dim3 grid(IntegerDivide(256)(n_nodes()), 1, 1);
dim3 blocks(256,1,1);
hipLaunchKernelGGL(( device::element_aabb_boundary_planes_kernel), dim3(grid), dim3(blocks), 0, 0, dev_array(),
n_nodes(),
boundaries.pointer(),
dirs.pointer());
CUT_CHECK_ERROR("copy_aabb_boundaries_kernel failed");
}
void
NodeChunkArray::update_parent_aabbs(cukd::NodeChunkArray & active) {
dim3 grid(IntegerDivide(256)(active.n_nodes()),1,1);
dim3 blocks(256,1,1);
// FIXME: we have to pass active.n_nodes() separately, the value
// on the device is invalid for some reason!
hipLaunchKernelGGL(( device::update_parent_aabbs_kernel), dim3(grid),dim3(blocks), 0, 0, active.n_nodes(),
active.dev_array(),
dev_array());
CUT_CHECK_ERROR("update_parent_aabbs__kernel failed");
}
void
NodeChunkArray::determine_empty_space(int n_nodes, int dir,
DevAABBArray & parent_aabb,
DevAABBArray & node_aabb, int* cut_dir) {
dim3 grid(IntegerDivide(256)(n_nodes),1,1);
dim3 blocks(256,1,1);
int shared_size = 3*sizeof(int) + sizeof(float);
hipLaunchKernelGGL(( device::determine_empty_space_cut_kernel), dim3(grid),dim3(blocks),shared_size, 0,
dir, n_nodes,
parent_aabb, node_aabb,
cut_dir);
CUT_CHECK_ERROR("determine_empty_space_cut_kernel failed");
}
} // namespace cukd
| 0b469134d3d26747e6e0a0ad2518ac2d772f8efe.cu | // Copyright (c) 2012, Thomas Schutzmeier
// FreeBSD License
// See https://github.com/unvirtual/cukd/blob/master/LICENSE
#include <thrust/sequence.h>
#include <thrust/scan.h>
#include <thrust/copy.h>
#include <thrust/remove.h>
#include "node_chunk_array.h"
#include "../primitives.h"
#include "../utils.h"
#include "../algorithms/reduction.h"
namespace cukd {
namespace device {
__global__
void
create_chunks_kernel(device::NodeChunkArray arr, int* offsets, int n_nodes,
int max_chunk_size);
__global__
void
tag_triangles_left_right_kernel(device::NodeChunkArray active, int n_elements,
int * tag);
__global__
void
element_clipping_kernel(int n_parent_nodes, device::NodeChunkArray active,
int* p_split_axis, float* p_split_pos,
DevTriangleArray tris, int n_left);
__global__
void
update_parent_aabbs_kernel(int n_nodes, device::NodeChunkArray active,
device::NodeChunkArray next);
__global__
void
tag_triangles_by_node_tag_kernel(device::NodeChunkArray nca, int* node_tags,
int* tags);
__global__
void
element_aabb_boundary_planes_kernel(device::NodeChunkArray nca, int nca_n_nodes,
float* boundaries, int* dirs);
__global__
void
determine_empty_space_cut_kernel(int dir, int n_nodes,
DevAABBArray parent_aabb,
DevAABBArray node_aabb,
int* cut_dir);
} // namespace device
/**********************************************************************************
*
* NodeChunkArray implementation
*
**********************************************************************************/
typedef thrust::tuple<int, int, float, AABBArray::AABBTuple> NodeTuple;
typedef thrust::tuple<thrust::device_vector<int>::iterator,
thrust::device_vector<int>::iterator,
thrust::device_vector<float>::iterator,
thrust::device_vector<int>::iterator,
AABBArray::AABBIterator, AABBArray::AABBIterator> NodeIteratorTuple;
NodeChunkArray::NodeChunkArray() : n_chunks_(0) {}
void
NodeChunkArray::init_root_node(int n_elements, AABBArray & tri_aabbs,
const UAABB & root_aabb) {
resize_nodes(1);
resize_elements(n_elements);
tri_aabbs.copy(triangle_aabbs);
thrust::sequence(element_idx.begin(), element_idx.end());
node_size.set(0, n_elements_);
node_element_first_idx.set(0,0);
parent_aabb.minima.set(0, root_aabb.minimum);
parent_aabb.maxima.set(0, root_aabb.maximum);
depth.set(0,0);
}
void
NodeChunkArray::empty_space_tags(DevVector<int> & cut_dirs,
DevVector<int> & n_empty_nodes) {
// Determine if empty space cuts for the current nodes in
// the active lsit are possible.
cut_dirs.resize(n_nodes_ + 1, 0);
n_empty_nodes.resize(n_nodes_ + 1);
DevAABBArray dev_parent_aabb = parent_aabb.dev_array();
DevAABBArray dev_node_aabb = node_aabb.dev_array();
for(int axis = 0; axis < 3; ++axis)
determine_empty_space(n_nodes_, axis,
dev_parent_aabb, dev_node_aabb,
cut_dirs.pointer());
// Compute the number of empty split nodes for each node.
// n_empty_nodes[n_nodes + 1] is the total number of empty split
// nodes at this pass.
CountBitsFunctor countbits;
thrust::exclusive_scan(
thrust::make_transform_iterator(cut_dirs.begin(), countbits),
thrust::make_transform_iterator(cut_dirs.end(), countbits),
n_empty_nodes.begin());
};
void
NodeChunkArray::divide_in_chunks() {
// compute the number of chunks for each node, such that each
// chunk holds at most MAX_ELEMENTS_PER_CHUNK elements
n_chunks_per_node.clear();
n_chunks_per_node.resize(n_nodes_);
thrust::transform(node_size.begin(),
node_size.begin() + n_nodes_,
n_chunks_per_node.begin(),
IntegerDivide(MAX_ELEMENTS_PER_CHUNK));
int n_new_chunks = thrust::reduce(n_chunks_per_node.begin(),
n_chunks_per_node.end());
resize_chunks(n_new_chunks);
// compute the indices to the element list per chunk
first_chunk_idx.clear();
first_chunk_idx.resize(n_nodes_);
thrust::exclusive_scan(n_chunks_per_node.begin(),
n_chunks_per_node.end(),
first_chunk_idx.begin());
dim3 blocks = dim3(256, 1, 1);
dim3 grid = dim3(IntegerDivide(blocks.x)(n_nodes_),1,1);
device::create_chunks_kernel<<<grid, blocks>>>(dev_array(),
first_chunk_idx.pointer(),
n_nodes_, MAX_ELEMENTS_PER_CHUNK);
CUT_CHECK_ERROR("create_chunks_kernel failed");
}
void
NodeChunkArray::chunk_node_reduce_aabbs() {
int* first_idx_ptr = chunk_element_first_idx.pointer();
int* size_ptr = chunk_size.pointer();
DevAABBArray tri_aabb = triangle_aabbs.dev_array();
DevAABBArray c_aabb = chunk_aabb.dev_array();
chunk_reduce<256, UFloat4, MinReductionMethod<UFloat4> >(
tri_aabb.minima, c_aabb.minima,
n_chunks_, first_idx_ptr, size_ptr);
chunk_reduce<256, UFloat4, MaxReductionMethod<UFloat4> >(
tri_aabb.maxima, c_aabb.maxima,
n_chunks_, first_idx_ptr, size_ptr);
out_keys.clear();
out_keys.resize(n_nodes());
thrust::equal_to<int> equal_keys;
thrust::reduce_by_key(node_idx.begin(), node_idx.begin() + n_chunks_,
chunk_aabb.begin(), out_keys.begin(),
node_aabb.begin(),
equal_keys, AABBArray::MinMax());
}
int
NodeChunkArray::append_by_tag(NodeChunkArray & nca, int new_nodes,
DevVector<int> & node_tags,
DevVector<int> & element_tags) {
element_tags.resize(nca.n_elements());
nca.divide_in_chunks();
nca.chunk_node_reduce_aabbs();
dim3 grid(nca.n_chunks(), 1, 1);
dim3 blocks(256,1,1);
device::tag_triangles_by_node_tag_kernel<<<grid, blocks>>>(nca.dev_array(),
node_tags.pointer(),
element_tags.pointer());
CUT_CHECK_ERROR("tag_triangles_by_node_tag_kernel failed");
int n_old_nodes = n_nodes();
int n_old_elements = n_elements();
resize_nodes(n_old_nodes + new_nodes);
// copy nodes
NodeIteratorTuple begin =
thrust::make_tuple(nca.node_size.begin(),
nca.split_axis.begin(),
nca.split_position.begin(),
nca.depth.begin(),
nca.parent_aabb.begin(),
nca.node_aabb.begin());
NodeIteratorTuple end =
thrust::make_tuple(nca.node_size.end(),
nca.split_axis.end(),
nca.split_position.end(),
nca.depth.end(),
nca.parent_aabb.end(),
nca.node_aabb.end());
NodeIteratorTuple result =
thrust::make_tuple(node_size.begin() + n_old_nodes,
split_axis.begin() + n_old_nodes,
split_position.begin() + n_old_nodes,
depth.begin() + n_old_nodes,
parent_aabb.begin() + n_old_nodes,
node_aabb.begin() + n_old_nodes);
IsNonZero<int> is_non_zero;
thrust::copy_if(thrust::make_zip_iterator(begin),
thrust::make_zip_iterator(end),
node_tags.begin(),
thrust::make_zip_iterator(result),
is_non_zero);
int new_elems = thrust::reduce(node_size.begin() + n_old_nodes, node_size.end());
resize_elements(n_old_elements + new_elems);
// now copy the elements
thrust::copy_if(
thrust::make_zip_iterator(thrust::make_tuple(nca.element_idx.begin(), nca.triangle_aabbs.begin())),
thrust::make_zip_iterator(thrust::make_tuple(nca.element_idx.end(), nca.triangle_aabbs.end())),
element_tags.begin(),
thrust::make_zip_iterator(thrust::make_tuple(element_idx.begin() + n_old_elements,
triangle_aabbs.begin() + n_old_elements)),
is_non_zero);
// nodes complete, compute first element indices
update_node_element_first_idx();
return new_elems;
}
void NodeChunkArray::remove_by_tag(DevVector<int> & node_tags,
DevVector<int> & element_tags,
int n_removed_nodes, int n_removed_elements) {
// remove nodes
NodeIteratorTuple begin =
thrust::make_tuple(node_size.begin(),
split_axis.begin(),
split_position.begin(),
depth.begin(),
parent_aabb.begin(),
node_aabb.begin());
NodeIteratorTuple end =
thrust::make_tuple(node_size.end(),
split_axis.end(),
split_position.end(),
depth.end(),
parent_aabb.end(),
node_aabb.end());
thrust::remove_if(thrust::make_zip_iterator(begin),
thrust::make_zip_iterator(end),
node_tags.begin(), IsNonZero<int>());
resize_nodes(node_size.size() - n_removed_nodes);
// remove elements
thrust::remove_if(thrust::make_zip_iterator(
thrust::make_tuple(element_idx.begin(), triangle_aabbs.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(element_idx.end(), triangle_aabbs.end())),
element_tags.begin(), IsNonZero<int>());
resize_elements(element_idx.size() - n_removed_elements);
update_node_element_first_idx();
}
void
NodeChunkArray::compact_elements_by_tag(DevVector<int> & tags, int start_tag_index,
int end_tag_index, int start_element_index,
NodeChunkArray & nca) {
typedef thrust::tuple<int, int, AABBArray::AABBTuple> TagElementAABBTuple;
typedef thrust::tuple<thrust::device_vector<int>::iterator,
thrust::device_vector<int>::iterator,
AABBArray::AABBIterator> IteratorTuple;
DevVector<int> temp;
temp.resize(tags.size());
FirstIsValue<TagElementAABBTuple, int> first_is_value(1);
IteratorTuple begin =
thrust::make_tuple(tags.begin() + start_tag_index,
element_idx.begin(),
triangle_aabbs.begin());
IteratorTuple end =
thrust::make_tuple(tags.begin() + end_tag_index,
element_idx.end(),
triangle_aabbs.end());
IteratorTuple result =
thrust::make_tuple(temp.begin(),
nca.element_idx.begin() + start_element_index,
nca.triangle_aabbs.begin() + start_element_index);
thrust::copy_if(
thrust::make_zip_iterator(begin), thrust::make_zip_iterator(end),
thrust::make_zip_iterator(result), first_is_value);
}
void NodeChunkArray::update_node_element_first_idx() {
thrust::exclusive_scan(node_size.begin(), node_size.end(),
node_element_first_idx.begin());
}
device::NodeChunkArray
NodeChunkArray::dev_array() {
device::NodeChunkArray dev_node_chunk_array;
dev_node_chunk_array.na = NodeArray::dev_array();
dev_node_chunk_array.node_idx = node_idx.pointer();
dev_node_chunk_array.chunk_size = chunk_size.pointer();
dev_node_chunk_array.chunk_element_first_idx = chunk_element_first_idx.pointer();
dev_node_chunk_array.node_aabb = node_aabb.dev_array();
dev_node_chunk_array.chunk_aabb = chunk_aabb.dev_array();
dev_node_chunk_array.parent_aabb = parent_aabb.dev_array();
dev_node_chunk_array.triangle_aabb = triangle_aabbs.dev_array();
return dev_node_chunk_array;
}
void
NodeChunkArray::tag_triangles_left_right(DevVector<int> & tags) {
dim3 grid(n_chunks(), 1, 1);
dim3 blocks(256,1,1);
device::tag_triangles_left_right_kernel<<<grid, blocks>>>(dev_array(),
n_elements(),
tags.pointer());
CUT_CHECK_ERROR("tag_triangles_left_right_kernel failed");
}
void
NodeChunkArray::element_clipping(DevVector<int> & split_axis,
DevVector<float> & split_pos,
TriangleArray & tris, int n_left) {
dim3 grid(n_chunks(), 1, 1);
dim3 blocks(256,1,1);
device::element_clipping_kernel<<<grid, blocks>>>(split_pos.size(),
dev_array(),
split_axis.pointer(),
split_pos.pointer(),
tris.dev_array(), n_left);
CUT_CHECK_ERROR("element_clipping_kernel failed");
}
void
NodeChunkArray::element_aabb_boundary_planes(DevVector<float> & boundaries,
DevVector<int> & dirs) {
dim3 grid(IntegerDivide(256)(n_nodes()), 1, 1);
dim3 blocks(256,1,1);
device::element_aabb_boundary_planes_kernel<<<grid, blocks>>>(dev_array(),
n_nodes(),
boundaries.pointer(),
dirs.pointer());
CUT_CHECK_ERROR("copy_aabb_boundaries_kernel failed");
}
void
NodeChunkArray::update_parent_aabbs(cukd::NodeChunkArray & active) {
dim3 grid(IntegerDivide(256)(active.n_nodes()),1,1);
dim3 blocks(256,1,1);
// FIXME: we have to pass active.n_nodes() separately, the value
// on the device is invalid for some reason!
device::update_parent_aabbs_kernel<<<grid,blocks>>>(active.n_nodes(),
active.dev_array(),
dev_array());
CUT_CHECK_ERROR("update_parent_aabbs__kernel failed");
}
void
NodeChunkArray::determine_empty_space(int n_nodes, int dir,
DevAABBArray & parent_aabb,
DevAABBArray & node_aabb, int* cut_dir) {
dim3 grid(IntegerDivide(256)(n_nodes),1,1);
dim3 blocks(256,1,1);
int shared_size = 3*sizeof(int) + sizeof(float);
device::determine_empty_space_cut_kernel<<<grid,blocks,shared_size>>>(
dir, n_nodes,
parent_aabb, node_aabb,
cut_dir);
CUT_CHECK_ERROR("determine_empty_space_cut_kernel failed");
}
} // namespace cukd
|
d61852a247032b90c6b4c5d1bca19ce11d7ba283.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/slice_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void
Slice(const int nthreads, const Dtype *in_data, const bool forward,
const int num_slices, const int slice_size, const int bottom_slice_axis,
const int top_slice_axis, const int offset_slice_axis, Dtype *out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_slice_size = slice_size * top_slice_axis;
const int slice_num = index / total_slice_size;
const int slice_index = index % total_slice_size;
const int bottom_index =
slice_index +
(slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;
if (forward) {
out_data[index] = in_data[bottom_index];
} else {
out_data[bottom_index] = in_data[index];
}
}
}
template <typename Dtype>
void SliceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
if (top.size() == 1) {
return;
}
int offset_slice_axis = 0;
const Dtype *bottom_data = bottom[0]->gpu_data();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = true;
for (int i = 0; i < top.size(); ++i) {
Dtype *top_data = top[i]->mutable_gpu_data();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, bottom_data, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data);
offset_slice_axis += top_slice_axis;
}
}
template <typename Dtype>
void SliceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top,
const vector<bool> &propagate_down,
const vector<Blob<Dtype> *> &bottom) {
if (!propagate_down[0] || top.size() == 1) {
return;
}
int offset_slice_axis = 0;
Dtype *bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = false;
for (int i = 0; i < top.size(); ++i) {
const Dtype *top_diff = top[i]->gpu_diff();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, top_diff, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff);
offset_slice_axis += top_slice_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SliceLayer);
} // namespace caffe
| d61852a247032b90c6b4c5d1bca19ce11d7ba283.cu | #include <vector>
#include "caffe/layers/slice_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void
Slice(const int nthreads, const Dtype *in_data, const bool forward,
const int num_slices, const int slice_size, const int bottom_slice_axis,
const int top_slice_axis, const int offset_slice_axis, Dtype *out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_slice_size = slice_size * top_slice_axis;
const int slice_num = index / total_slice_size;
const int slice_index = index % total_slice_size;
const int bottom_index =
slice_index +
(slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;
if (forward) {
out_data[index] = in_data[bottom_index];
} else {
out_data[bottom_index] = in_data[index];
}
}
}
template <typename Dtype>
void SliceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
if (top.size() == 1) {
return;
}
int offset_slice_axis = 0;
const Dtype *bottom_data = bottom[0]->gpu_data();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = true;
for (int i = 0; i < top.size(); ++i) {
Dtype *top_data = top[i]->mutable_gpu_data();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, bottom_data, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data);
offset_slice_axis += top_slice_axis;
}
}
template <typename Dtype>
void SliceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top,
const vector<bool> &propagate_down,
const vector<Blob<Dtype> *> &bottom) {
if (!propagate_down[0] || top.size() == 1) {
return;
}
int offset_slice_axis = 0;
Dtype *bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = false;
for (int i = 0; i < top.size(); ++i) {
const Dtype *top_diff = top[i]->gpu_diff();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, top_diff, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff);
offset_slice_axis += top_slice_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SliceLayer);
} // namespace caffe
|
188bea5f205d7f682e9164274072a6d05cc7fb5c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "VSIDS.cuh"
__global__ void vsids_test()
{
VSIDS vsids(10);
vsids.block_var(7);
Clause c;
create_clause_on_dev(2, c);
Lit l = mkLit(3, false);
addLitToDev(l, c);
Lit l2 = mkLit(8, true);
addLitToDev(l2, c);
for (int i = 0; i < 50; i++) {
vsids.handle_clause(c);
}
int total = 0;
int match = 0;
for (int i = 0; i < 5; i++) {
Lit lit = vsids.next_literal();
printf("Lit = ");
print_lit(lit);
printf("\n");
if (lit == l) {
match++;
}
total++;
vsids.print();
}
printf("There were %f%% of matches.\n", ((float)match * 100) / total);
printf("Done.\n");
}
int main_test()
{
hipLaunchKernelGGL(( vsids_test) , dim3(1), dim3(1), 0, 0, );
hipDeviceReset();
}
| 188bea5f205d7f682e9164274072a6d05cc7fb5c.cu | #include "VSIDS.cuh"
__global__ void vsids_test()
{
VSIDS vsids(10);
vsids.block_var(7);
Clause c;
create_clause_on_dev(2, c);
Lit l = mkLit(3, false);
addLitToDev(l, c);
Lit l2 = mkLit(8, true);
addLitToDev(l2, c);
for (int i = 0; i < 50; i++) {
vsids.handle_clause(c);
}
int total = 0;
int match = 0;
for (int i = 0; i < 5; i++) {
Lit lit = vsids.next_literal();
printf("Lit = ");
print_lit(lit);
printf("\n");
if (lit == l) {
match++;
}
total++;
vsids.print();
}
printf("There were %f%% of matches.\n", ((float)match * 100) / total);
printf("Done.\n");
}
int main_test()
{
vsids_test <<< 1, 1>>>();
cudaDeviceReset();
}
|
e57c3eb37edaf6d44ee54f4dc66d78adb0541a57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <THH/THHTensorSort.cuh>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/binary_search.h>
#include <c10/macros/Macros.h>
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
namespace {
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void _sparse_mask_copy_kernel(
int64_t total_threads,
int64_t t_nnz,
const TensorInfo<int64_t, int64_t> t_indices_ti,
const TensorInfo<int64_t, int64_t> mask_indices_ti,
const TensorInfo<int64_t, int64_t> t_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> t_values_ti,
TensorInfo<scalar_t, int64_t> r_values_ti
) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = t_indices_pos_ti.data[i];
bool has_match = false;
if (j >= 0 && j < t_nnz && t_indices_ti.data[j] == mask_indices_ti.data[i]) {
has_match = true;
}
int64_t values_stride0 = r_values_ti.strides[0];
int64_t out_start = i * values_stride0;
int64_t out_end = (i + 1) * values_stride0;
int64_t in_start = j * t_values_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
r_values_ti.data[out_i] = t_values_ti.data[in_i];
}
}
}
} // end namespace
SparseTensor coalesce_sparse_cuda(const SparseTensor& self) {
int64_t nnz = self._nnz();
if (self.is_coalesced()) {
return self;
}
// NOTE: Since `coalesce` is not an in-place operation when `is_coalesced` is false,
// we should keep the original tensor intact and do coalesce on a copy of the tensor
if (nnz < 2) {
SparseTensor dst = self.clone();
dst._coalesced_(true);
return dst;
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Replace instances with
// For indices, a simple sort + unique suffices
// For values, we use a custom kernel for segmented reduction (can't use Thrust due to indirection).
Tensor values = self._values();
int64_t sparse_dim = self.sparse_dim();
// indices will be modified by Thrust, so we have to clone or use new storage
// here.
Tensor indices1D = flatten_indices(self._indices(), self.sizes(), true);
Tensor origIndices = at::empty({nnz}, self._indices().options());
Tensor uniqueOffsets = at::empty({nnz}, self._indices().options());
typedef thrust::device_ptr<int64_t> thrust_ptr;
thrust_ptr indicesIter(indices1D.data_ptr<int64_t>());
thrust_ptr origIndicesIter(origIndices.data_ptr<int64_t>());
thrust_ptr uniqueOffsetsIter(uniqueOffsets.data_ptr<int64_t>());
// Fill sortedOrigIndices with sequential indices
thrust::counting_iterator<int64_t> countIterI(0);
thrust::counting_iterator<int64_t> countIterO(0);
thrust::copy(policy, countIterI, countIterI + nnz, origIndicesIter);
thrust::copy(policy, countIterO, countIterO + nnz, uniqueOffsetsIter);
thrust::sort_by_key(policy,
indicesIter, indicesIter + nnz,
origIndicesIter, ThrustLTOp<int64_t>()
);
// this forces device-host synchronization!
thrust::pair<thrust_ptr, thrust_ptr> newEnd = thrust::unique_by_key(policy,
indicesIter, indicesIter + nnz,
uniqueOffsetsIter
);
int64_t newNnz = newEnd.first - indicesIter;
indices1D.resize_({1, newNnz});
auto newValues_size = values.sizes().vec();
newValues_size[0] = newNnz;
Tensor newValues = at::empty(newValues_size, values.options());
// If there is no values to copy, save running the kernel.
if (newValues.numel() > 0) {
const int SZ = 4;
values = values.contiguous();
int64_t stride = at::prod_intlist(values.sizes().slice(1));
dim3 grid(THCCeilDiv(newNnz, (int64_t) SZ), THCCeilDiv(stride, (int64_t) C10_WARP_SIZE*SZ));
dim3 block(C10_WARP_SIZE, SZ);
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, values.scalar_type(), "coalesce_sparse_cuda", [&] {
using cuda_accscalar_t = acc_type<scalar_t, /* is_cuda */ true>;
hipLaunchKernelGGL(( apply::coalesceValuesKernel<scalar_t, cuda_accscalar_t>), dim3(grid), dim3(block), 0, stream,
uniqueOffsets.data_ptr<int64_t>(),
origIndices.data_ptr<int64_t>(),
values.data_ptr<scalar_t>(),
newValues.data_ptr<scalar_t>(),
nnz,
newNnz,
stride
);
});
}
// this grid-strided version is slower but probably more flexible
// to different sizes
// int64_t blockX = min(stride, (int64_t) 512);
// dim3 block(blockX, 512 / blockX);
// int64_t grid = min((int64_t) 1024, THCCeilDiv((int64_t) newNnz * stride, (int64_t) block.x * block.y));
// THCSTensor_coalesceValuesKernel_gridStrided<real, accreal><<<grid, block, 0, stream>>>(
// THCIndexTensor_(data)(state, uniqueOffsets),
// THCIndexTensor_(data)(state, origIndices),
// THCTensor_(data)(state, values),
// THCTensor_(data)(state, newValues),
// nnz,
// newNnz,
// stride
// );
////////////////////////////////////////////////////////////
// unflatten indices if necessary
Tensor newIndices;
if (sparse_dim == 1) {
newIndices = indices1D;
} else {
newIndices = at::empty({sparse_dim, newNnz}, origIndices.options());
for (int64_t d = sparse_dim - 1; d >= 0; d--) {
// NB: Not a select, so I can preserve the outer dimension
Tensor indicesSlice = newIndices.narrow(0, d, 1);
// Note for the porting guide: THCTensor_(copy) does NOT do normal
// broadcasting logic; instead, it will blast the elements from one
// to the other so long as the numel is the same
indicesSlice.copy_(indices1D);
indices1D.floor_divide_(self.size(d));
indicesSlice.add_(indices1D, -self.size(d));
}
}
////////////////////////////////////////////////////////////
// We can use unsafe sparse tensor constructor because the indices do not
// need to be revalidated as we do not add or change indices, just remove
// duplicates.
SparseTensor dst = ::at::native::_sparse_coo_tensor_unsafe(newIndices, newValues, self.sizes())._coalesced_(true);
THCudaCheck(hipGetLastError());
return dst;
}
Tensor sparse_mask_helper_cuda(
const SparseTensor& t,
const Tensor& mask_indices) {
/*
This is a helper function which filter values from `t._values()` using the
`mask_indices`. This CUDA implementation uses `thrust::lower_bound`
operation to find the intersection of the `mask_indices` and the
`t._indices()` to then filter the values.
Inputs:
`t` - coalesced sparse tensor input
`mask_indices` - mask indices tensor
Note: The nnz in the output tensor will be same as the `mask_indices`. So it will
works independently if the mask is coalesced or not.
*/
TORCH_CHECK(t.is_sparse(), "t: input is not a sparse tensor");
TORCH_CHECK(t.is_coalesced(), "t: input is uncoalesced");
TORCH_CHECK(mask_indices.dim() == t._indices().dim(), "mask_indices: operands have incompatible indices dim; self has dim ",
t._indices().dim(), " but mask has dim ", mask_indices.dim());
TORCH_CHECK(mask_indices.is_contiguous(), "mask_indices: mask is not contiguous");
int64_t r_nnz = mask_indices.size(1);
auto t_values = t._values().contiguous();
auto full_size = t.sizes();
auto vsize = t_values.sizes().vec();
vsize[0] = r_nnz;
if (t.sparse_dim() == 0) {
Tensor t_values_expand = t_values;
t_values_expand = t_values_expand.expand(vsize).contiguous();
return t_values_expand;
}
Tensor r_values = at::zeros({vsize}, t_values.options());
auto t_indices = t._indices().contiguous();
auto t_nnz = t._nnz();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Step 1: flatten the sparse indices `t._indices()` tensor into a 1D indices
// tensor `t_flatten_indices`.
auto t_flatten_indices = at::sparse::flatten_indices(t_indices, full_size).contiguous();
// Step 2: flatten the sparse indices `mask_indices` tensor into a 1D indices
// tensor `mask_flatten_indices`. Note: This could be not sorted if the input
// indices in the constructor are not in a coalesced form
auto flattened_mask_indices =
at::sparse::flatten_indices(mask_indices, full_size);
Tensor t_indices_pos = at::empty({r_nnz}, mask_indices.options());
// Step 3: Match the flattened `mask_indices` with the flattened
// `t._indices()` using the `thrust::lower_bound`
thrust::lower_bound(
policy,
t_flatten_indices.data_ptr<int64_t>(),
t_flatten_indices.data_ptr<int64_t>() + t_nnz,
flattened_mask_indices.data_ptr<int64_t>(),
flattened_mask_indices.data_ptr<int64_t>() + r_nnz,
t_indices_pos.data_ptr<int64_t>());
// Step 4: Copy the Filtered `t._values()` using the matches at `t_indices_pos`
if (r_nnz > 0 && t_values.numel() > 0) {
int64_t block_size = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
auto grid_size = cuda::ATenCeilDiv(r_nnz, block_size);
auto t_indices_ti = getTensorInfo<int64_t, int64_t>(t_flatten_indices);
auto mask_indices_ti =
getTensorInfo<int64_t, int64_t>(flattened_mask_indices);
auto t_indices_pos_ti =
getTensorInfo<int64_t, int64_t>(t_indices_pos);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
r_values.scalar_type(), "sparse_mask_helper_cuda", [&] {
auto t_values_ti = getTensorInfo<scalar_t, int64_t>(t_values);
auto r_values_ti =
getTensorInfo<scalar_t, int64_t>(r_values);
hipLaunchKernelGGL(( _sparse_mask_copy_kernel<scalar_t>), dim3(grid_size), dim3(block_size), 0, stream,
r_nnz,
t_nnz,
t_indices_ti,
mask_indices_ti,
t_indices_pos_ti,
t_values_ti,
r_values_ti);
});
}
return r_values;
}
}} // namespace at::native
| e57c3eb37edaf6d44ee54f4dc66d78adb0541a57.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <THC/THCTensorSort.cuh>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/binary_search.h>
#include <c10/macros/Macros.h>
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
namespace {
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void _sparse_mask_copy_kernel(
int64_t total_threads,
int64_t t_nnz,
const TensorInfo<int64_t, int64_t> t_indices_ti,
const TensorInfo<int64_t, int64_t> mask_indices_ti,
const TensorInfo<int64_t, int64_t> t_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> t_values_ti,
TensorInfo<scalar_t, int64_t> r_values_ti
) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = t_indices_pos_ti.data[i];
bool has_match = false;
if (j >= 0 && j < t_nnz && t_indices_ti.data[j] == mask_indices_ti.data[i]) {
has_match = true;
}
int64_t values_stride0 = r_values_ti.strides[0];
int64_t out_start = i * values_stride0;
int64_t out_end = (i + 1) * values_stride0;
int64_t in_start = j * t_values_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
r_values_ti.data[out_i] = t_values_ti.data[in_i];
}
}
}
} // end namespace
SparseTensor coalesce_sparse_cuda(const SparseTensor& self) {
int64_t nnz = self._nnz();
if (self.is_coalesced()) {
return self;
}
// NOTE: Since `coalesce` is not an in-place operation when `is_coalesced` is false,
// we should keep the original tensor intact and do coalesce on a copy of the tensor
if (nnz < 2) {
SparseTensor dst = self.clone();
dst._coalesced_(true);
return dst;
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Replace instances with
// For indices, a simple sort + unique suffices
// For values, we use a custom kernel for segmented reduction (can't use Thrust due to indirection).
Tensor values = self._values();
int64_t sparse_dim = self.sparse_dim();
// indices will be modified by Thrust, so we have to clone or use new storage
// here.
Tensor indices1D = flatten_indices(self._indices(), self.sizes(), true);
Tensor origIndices = at::empty({nnz}, self._indices().options());
Tensor uniqueOffsets = at::empty({nnz}, self._indices().options());
typedef thrust::device_ptr<int64_t> thrust_ptr;
thrust_ptr indicesIter(indices1D.data_ptr<int64_t>());
thrust_ptr origIndicesIter(origIndices.data_ptr<int64_t>());
thrust_ptr uniqueOffsetsIter(uniqueOffsets.data_ptr<int64_t>());
// Fill sortedOrigIndices with sequential indices
thrust::counting_iterator<int64_t> countIterI(0);
thrust::counting_iterator<int64_t> countIterO(0);
thrust::copy(policy, countIterI, countIterI + nnz, origIndicesIter);
thrust::copy(policy, countIterO, countIterO + nnz, uniqueOffsetsIter);
thrust::sort_by_key(policy,
indicesIter, indicesIter + nnz,
origIndicesIter, ThrustLTOp<int64_t>()
);
// this forces device-host synchronization!
thrust::pair<thrust_ptr, thrust_ptr> newEnd = thrust::unique_by_key(policy,
indicesIter, indicesIter + nnz,
uniqueOffsetsIter
);
int64_t newNnz = newEnd.first - indicesIter;
indices1D.resize_({1, newNnz});
auto newValues_size = values.sizes().vec();
newValues_size[0] = newNnz;
Tensor newValues = at::empty(newValues_size, values.options());
// If there is no values to copy, save running the kernel.
if (newValues.numel() > 0) {
const int SZ = 4;
values = values.contiguous();
int64_t stride = at::prod_intlist(values.sizes().slice(1));
dim3 grid(THCCeilDiv(newNnz, (int64_t) SZ), THCCeilDiv(stride, (int64_t) C10_WARP_SIZE*SZ));
dim3 block(C10_WARP_SIZE, SZ);
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, values.scalar_type(), "coalesce_sparse_cuda", [&] {
using cuda_accscalar_t = acc_type<scalar_t, /* is_cuda */ true>;
apply::coalesceValuesKernel<scalar_t, cuda_accscalar_t><<<grid, block, 0, stream>>>(
uniqueOffsets.data_ptr<int64_t>(),
origIndices.data_ptr<int64_t>(),
values.data_ptr<scalar_t>(),
newValues.data_ptr<scalar_t>(),
nnz,
newNnz,
stride
);
});
}
// this grid-strided version is slower but probably more flexible
// to different sizes
// int64_t blockX = min(stride, (int64_t) 512);
// dim3 block(blockX, 512 / blockX);
// int64_t grid = min((int64_t) 1024, THCCeilDiv((int64_t) newNnz * stride, (int64_t) block.x * block.y));
// THCSTensor_coalesceValuesKernel_gridStrided<real, accreal><<<grid, block, 0, stream>>>(
// THCIndexTensor_(data)(state, uniqueOffsets),
// THCIndexTensor_(data)(state, origIndices),
// THCTensor_(data)(state, values),
// THCTensor_(data)(state, newValues),
// nnz,
// newNnz,
// stride
// );
////////////////////////////////////////////////////////////
// unflatten indices if necessary
Tensor newIndices;
if (sparse_dim == 1) {
newIndices = indices1D;
} else {
newIndices = at::empty({sparse_dim, newNnz}, origIndices.options());
for (int64_t d = sparse_dim - 1; d >= 0; d--) {
// NB: Not a select, so I can preserve the outer dimension
Tensor indicesSlice = newIndices.narrow(0, d, 1);
// Note for the porting guide: THCTensor_(copy) does NOT do normal
// broadcasting logic; instead, it will blast the elements from one
// to the other so long as the numel is the same
indicesSlice.copy_(indices1D);
indices1D.floor_divide_(self.size(d));
indicesSlice.add_(indices1D, -self.size(d));
}
}
////////////////////////////////////////////////////////////
// We can use unsafe sparse tensor constructor because the indices do not
// need to be revalidated as we do not add or change indices, just remove
// duplicates.
SparseTensor dst = ::at::native::_sparse_coo_tensor_unsafe(newIndices, newValues, self.sizes())._coalesced_(true);
THCudaCheck(cudaGetLastError());
return dst;
}
Tensor sparse_mask_helper_cuda(
const SparseTensor& t,
const Tensor& mask_indices) {
/*
This is a helper function which filter values from `t._values()` using the
`mask_indices`. This CUDA implementation uses `thrust::lower_bound`
operation to find the intersection of the `mask_indices` and the
`t._indices()` to then filter the values.
Inputs:
`t` - coalesced sparse tensor input
`mask_indices` - mask indices tensor
Note: The nnz in the output tensor will be same as the `mask_indices`. So it will
works independently if the mask is coalesced or not.
*/
TORCH_CHECK(t.is_sparse(), "t: input is not a sparse tensor");
TORCH_CHECK(t.is_coalesced(), "t: input is uncoalesced");
TORCH_CHECK(mask_indices.dim() == t._indices().dim(), "mask_indices: operands have incompatible indices dim; self has dim ",
t._indices().dim(), " but mask has dim ", mask_indices.dim());
TORCH_CHECK(mask_indices.is_contiguous(), "mask_indices: mask is not contiguous");
int64_t r_nnz = mask_indices.size(1);
auto t_values = t._values().contiguous();
auto full_size = t.sizes();
auto vsize = t_values.sizes().vec();
vsize[0] = r_nnz;
if (t.sparse_dim() == 0) {
Tensor t_values_expand = t_values;
t_values_expand = t_values_expand.expand(vsize).contiguous();
return t_values_expand;
}
Tensor r_values = at::zeros({vsize}, t_values.options());
auto t_indices = t._indices().contiguous();
auto t_nnz = t._nnz();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Step 1: flatten the sparse indices `t._indices()` tensor into a 1D indices
// tensor `t_flatten_indices`.
auto t_flatten_indices = at::sparse::flatten_indices(t_indices, full_size).contiguous();
// Step 2: flatten the sparse indices `mask_indices` tensor into a 1D indices
// tensor `mask_flatten_indices`. Note: This could be not sorted if the input
// indices in the constructor are not in a coalesced form
auto flattened_mask_indices =
at::sparse::flatten_indices(mask_indices, full_size);
Tensor t_indices_pos = at::empty({r_nnz}, mask_indices.options());
// Step 3: Match the flattened `mask_indices` with the flattened
// `t._indices()` using the `thrust::lower_bound`
thrust::lower_bound(
policy,
t_flatten_indices.data_ptr<int64_t>(),
t_flatten_indices.data_ptr<int64_t>() + t_nnz,
flattened_mask_indices.data_ptr<int64_t>(),
flattened_mask_indices.data_ptr<int64_t>() + r_nnz,
t_indices_pos.data_ptr<int64_t>());
// Step 4: Copy the Filtered `t._values()` using the matches at `t_indices_pos`
if (r_nnz > 0 && t_values.numel() > 0) {
int64_t block_size = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
auto grid_size = cuda::ATenCeilDiv(r_nnz, block_size);
auto t_indices_ti = getTensorInfo<int64_t, int64_t>(t_flatten_indices);
auto mask_indices_ti =
getTensorInfo<int64_t, int64_t>(flattened_mask_indices);
auto t_indices_pos_ti =
getTensorInfo<int64_t, int64_t>(t_indices_pos);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
r_values.scalar_type(), "sparse_mask_helper_cuda", [&] {
auto t_values_ti = getTensorInfo<scalar_t, int64_t>(t_values);
auto r_values_ti =
getTensorInfo<scalar_t, int64_t>(r_values);
_sparse_mask_copy_kernel<scalar_t><<<grid_size, block_size, 0, stream>>>(
r_nnz,
t_nnz,
t_indices_ti,
mask_indices_ti,
t_indices_pos_ti,
t_values_ti,
r_values_ti);
});
}
return r_values;
}
}} // namespace at::native
|
25cbae821ad8a62e5c7a26d12d896c3080b62719.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
}
#define IDX2C(i, j, ld) ((j)*(ld)+(i))
#define SQR(x) ((x)*(x)) // x^2
__global__ void multiply_arrays(double* signals, double const* weights){
signals[blockIdx.x * blockDim.x + threadIdx.x] *= weights[blockIdx.x * blockDim.x + threadIdx.x];
} | 25cbae821ad8a62e5c7a26d12d896c3080b62719.cu | #include "includes.h"
extern "C" {
}
#define IDX2C(i, j, ld) ((j)*(ld)+(i))
#define SQR(x) ((x)*(x)) // x^2
__global__ void multiply_arrays(double* signals, double const* weights){
signals[blockIdx.x * blockDim.x + threadIdx.x] *= weights[blockIdx.x * blockDim.x + threadIdx.x];
} |
d9f6f2ef5f7ab745ea8bc1adb154c467e9aaa1dd.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithms.hpp>
#include <experimental/graph_view.hpp>
#include <patterns/copy_to_adj_matrix_row_col.cuh>
#include <patterns/count_if_e.cuh>
#include <patterns/reduce_op.cuh>
#include <patterns/transform_reduce_e.cuh>
#include <patterns/update_frontier_v_push_if_out_nbr.cuh>
#include <patterns/vertex_frontier.cuh>
#include <utilities/error.hpp>
#include <vertex_partition_device.cuh>
#include <raft/cudart_utils.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/fill.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <limits>
namespace cugraph {
namespace experimental {
namespace detail {
template <typename GraphViewType, typename PredecessorIterator>
void sssp(raft::handle_t const &handle,
GraphViewType const &push_graph_view,
typename GraphViewType::weight_type *distances,
PredecessorIterator predecessor_first,
typename GraphViewType::vertex_type source_vertex,
typename GraphViewType::weight_type cutoff,
bool do_expensive_check)
{
using vertex_t = typename GraphViewType::vertex_type;
using weight_t = typename GraphViewType::weight_type;
static_assert(std::is_integral<vertex_t>::value,
"GraphViewType::vertex_type should be integral.");
static_assert(!GraphViewType::is_adj_matrix_transposed,
"GraphViewType should support the push model.");
auto const num_vertices = push_graph_view.get_number_of_vertices();
auto const num_edges = push_graph_view.get_number_of_edges();
if (num_vertices == 0) { return; }
// implements the Near-Far Pile method in
// A. Davidson, S. Baxter, M. Garland, and J. D. Owens, "Work-efficient parallel GPU methods for
// single-source shortest paths," 2014.
// 1. check input arguments
CUGRAPH_EXPECTS(push_graph_view.is_valid_vertex(source_vertex),
"Invalid input argument: source vertex out-of-range.");
if (do_expensive_check) {
auto num_negative_edge_weights =
count_if_e(handle,
push_graph_view,
thrust::make_constant_iterator(0) /* dummy */,
thrust::make_constant_iterator(0) /* dummy */,
[] __device__(vertex_t src, vertex_t dst, weight_t w, auto src_val, auto dst_val) {
return w < 0.0;
});
CUGRAPH_EXPECTS(num_negative_edge_weights == 0,
"Invalid input argument: input graph should have non-negative edge weights.");
}
// 2. initialize distances and predecessors
auto constexpr invalid_distance = std::numeric_limits<weight_t>::max();
auto constexpr invalid_vertex = invalid_vertex_id<vertex_t>::value;
auto val_first = thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first));
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
thrust::make_counting_iterator(push_graph_view.get_local_vertex_first()),
thrust::make_counting_iterator(push_graph_view.get_local_vertex_last()),
val_first,
[source_vertex] __device__(auto val) {
auto distance = invalid_distance;
if (val == source_vertex) { distance = weight_t{0.0}; }
return thrust::make_tuple(distance, invalid_vertex);
});
if (num_edges == 0) { return; }
// 3. update delta
weight_t average_vertex_degree{0.0};
weight_t average_edge_weight{0.0};
thrust::tie(average_vertex_degree, average_edge_weight) = transform_reduce_e(
handle,
push_graph_view,
thrust::make_constant_iterator(0) /* dummy */,
thrust::make_constant_iterator(0) /* dummy */,
[] __device__(vertex_t row, vertex_t col, weight_t w, auto row_val, auto col_val) {
return thrust::make_tuple(weight_t{1.0}, w);
},
thrust::make_tuple(weight_t{0.0}, weight_t{0.0}));
average_vertex_degree /= static_cast<weight_t>(num_vertices);
average_edge_weight /= static_cast<weight_t>(num_edges);
auto delta =
(static_cast<weight_t>(raft::warp_size()) * average_edge_weight) / average_vertex_degree;
// 4. initialize SSSP frontier
enum class Bucket { cur_near, new_near, far, num_buckets };
// FIXME: need to double check the bucket sizes are sufficient
std::vector<size_t> bucket_sizes(static_cast<size_t>(Bucket::num_buckets),
push_graph_view.get_number_of_local_vertices());
VertexFrontier<thrust::tuple<weight_t, vertex_t>,
vertex_t,
GraphViewType::is_multi_gpu,
static_cast<size_t>(Bucket::num_buckets)>
vertex_frontier(handle, bucket_sizes);
// 5. SSSP iteration
bool vertex_and_adj_matrix_row_ranges_coincide =
push_graph_view.get_number_of_local_vertices() ==
push_graph_view.get_number_of_local_adj_matrix_partition_rows()
? true
: false;
rmm::device_uvector<weight_t> adj_matrix_row_distances(0, handle.get_stream());
if (!vertex_and_adj_matrix_row_ranges_coincide) {
adj_matrix_row_distances.resize(push_graph_view.get_number_of_local_adj_matrix_partition_rows(),
handle.get_stream());
thrust::fill(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
adj_matrix_row_distances.begin(),
adj_matrix_row_distances.end(),
std::numeric_limits<weight_t>::max());
}
auto row_distances =
!vertex_and_adj_matrix_row_ranges_coincide ? adj_matrix_row_distances.data() : distances;
if (push_graph_view.is_local_vertex_nocheck(source_vertex)) {
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).insert(source_vertex);
}
auto near_far_threshold = delta;
while (true) {
if (!vertex_and_adj_matrix_row_ranges_coincide) {
copy_to_adj_matrix_row(
handle,
push_graph_view,
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).begin(),
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).end(),
distances,
row_distances);
}
vertex_partition_device_t<GraphViewType> vertex_partition(push_graph_view);
update_frontier_v_push_if_out_nbr(
handle,
push_graph_view,
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).begin(),
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).end(),
row_distances,
thrust::make_constant_iterator(0) /* dummy */,
[vertex_partition, distances, cutoff] __device__(
vertex_t src, vertex_t dst, weight_t w, auto src_val, auto dst_val) {
auto push = true;
auto new_distance = src_val + w;
auto threshold = cutoff;
if (vertex_partition.is_local_vertex_nocheck(dst)) {
auto local_vertex_offset =
vertex_partition.get_local_vertex_offset_from_vertex_nocheck(dst);
auto old_distance = *(distances + local_vertex_offset);
threshold = old_distance < threshold ? old_distance : threshold;
}
if (new_distance >= threshold) { push = false; }
return thrust::make_tuple(push, new_distance, src);
},
reduce_op::min<thrust::tuple<weight_t, vertex_t>>(),
distances,
thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first)),
vertex_frontier,
[near_far_threshold] __device__(auto v_val, auto pushed_val) {
auto new_dist = thrust::get<0>(pushed_val);
auto idx = new_dist < v_val
? (new_dist < near_far_threshold ? static_cast<size_t>(Bucket::new_near)
: static_cast<size_t>(Bucket::far))
: VertexFrontier<thrust::tuple<vertex_t>, vertex_t>::kInvalidBucketIdx;
return thrust::make_tuple(idx, thrust::get<0>(pushed_val), thrust::get<1>(pushed_val));
});
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).clear();
if (vertex_frontier.get_bucket(static_cast<size_t>(Bucket::new_near)).aggregate_size() > 0) {
vertex_frontier.swap_buckets(static_cast<size_t>(Bucket::cur_near),
static_cast<size_t>(Bucket::new_near));
} else if (vertex_frontier.get_bucket(static_cast<size_t>(Bucket::far)).aggregate_size() >
0) { // near queue is empty, split the far queue
auto old_near_far_threshold = near_far_threshold;
near_far_threshold += delta;
size_t new_near_size{0};
size_t new_far_size{0};
while (true) {
vertex_frontier.split_bucket(
static_cast<size_t>(Bucket::far),
[vertex_partition, distances, old_near_far_threshold, near_far_threshold] __device__(
auto v) {
auto dist =
*(distances + vertex_partition.get_local_vertex_offset_from_vertex_nocheck(v));
if (dist < old_near_far_threshold) {
return VertexFrontier<thrust::tuple<vertex_t>, vertex_t>::kInvalidBucketIdx;
} else if (dist < near_far_threshold) {
return static_cast<size_t>(Bucket::cur_near);
} else {
return static_cast<size_t>(Bucket::far);
}
});
new_near_size =
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).aggregate_size();
new_far_size =
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::far)).aggregate_size();
if ((new_near_size > 0) || (new_far_size == 0)) {
break;
} else {
near_far_threshold += delta;
}
}
if ((new_near_size == 0) && (new_far_size == 0)) { break; }
} else {
break;
}
}
CUDA_TRY(hipStreamSynchronize(
handle.get_stream())); // this is as necessary vertex_frontier will become out-of-scope once
// this function returns (FIXME: should I stream sync in VertexFrontier
// destructor?)
}
} // namespace detail
template <typename vertex_t, typename edge_t, typename weight_t, bool multi_gpu>
void sssp(raft::handle_t const &handle,
graph_view_t<vertex_t, edge_t, weight_t, false, multi_gpu> const &graph_view,
weight_t *distances,
vertex_t *predecessors,
vertex_t source_vertex,
weight_t cutoff,
bool do_expensive_check)
{
if (predecessors != nullptr) {
detail::sssp(
handle, graph_view, distances, predecessors, source_vertex, cutoff, do_expensive_check);
} else {
detail::sssp(handle,
graph_view,
distances,
thrust::make_discard_iterator(),
source_vertex,
cutoff,
do_expensive_check);
}
}
// explicit instantiation
template void sssp(raft::handle_t const &handle,
graph_view_t<int32_t, int32_t, float, false, true> const &graph_view,
float *distances,
int32_t *predecessors,
int32_t source_vertex,
float cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int32_t, int32_t, double, false, true> const &graph_view,
double *distances,
int32_t *predecessors,
int32_t source_vertex,
double cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int32_t, int64_t, float, false, true> const &graph_view,
float *distances,
int32_t *predecessors,
int32_t source_vertex,
float cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int32_t, int64_t, double, false, true> const &graph_view,
double *distances,
int32_t *predecessors,
int32_t source_vertex,
double cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int64_t, int64_t, float, false, true> const &graph_view,
float *distances,
int64_t *predecessors,
int64_t source_vertex,
float cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int64_t, int64_t, double, false, true> const &graph_view,
double *distances,
int64_t *predecessors,
int64_t source_vertex,
double cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int32_t, int32_t, float, false, false> const &graph_view,
float *distances,
int32_t *predecessors,
int32_t source_vertex,
float cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int32_t, int32_t, double, false, false> const &graph_view,
double *distances,
int32_t *predecessors,
int32_t source_vertex,
double cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int32_t, int64_t, float, false, false> const &graph_view,
float *distances,
int32_t *predecessors,
int32_t source_vertex,
float cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int32_t, int64_t, double, false, false> const &graph_view,
double *distances,
int32_t *predecessors,
int32_t source_vertex,
double cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int64_t, int64_t, float, false, false> const &graph_view,
float *distances,
int64_t *predecessors,
int64_t source_vertex,
float cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int64_t, int64_t, double, false, false> const &graph_view,
double *distances,
int64_t *predecessors,
int64_t source_vertex,
double cutoff,
bool do_expensive_check);
} // namespace experimental
} // namespace cugraph
| d9f6f2ef5f7ab745ea8bc1adb154c467e9aaa1dd.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithms.hpp>
#include <experimental/graph_view.hpp>
#include <patterns/copy_to_adj_matrix_row_col.cuh>
#include <patterns/count_if_e.cuh>
#include <patterns/reduce_op.cuh>
#include <patterns/transform_reduce_e.cuh>
#include <patterns/update_frontier_v_push_if_out_nbr.cuh>
#include <patterns/vertex_frontier.cuh>
#include <utilities/error.hpp>
#include <vertex_partition_device.cuh>
#include <raft/cudart_utils.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/fill.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <limits>
namespace cugraph {
namespace experimental {
namespace detail {
template <typename GraphViewType, typename PredecessorIterator>
void sssp(raft::handle_t const &handle,
GraphViewType const &push_graph_view,
typename GraphViewType::weight_type *distances,
PredecessorIterator predecessor_first,
typename GraphViewType::vertex_type source_vertex,
typename GraphViewType::weight_type cutoff,
bool do_expensive_check)
{
using vertex_t = typename GraphViewType::vertex_type;
using weight_t = typename GraphViewType::weight_type;
static_assert(std::is_integral<vertex_t>::value,
"GraphViewType::vertex_type should be integral.");
static_assert(!GraphViewType::is_adj_matrix_transposed,
"GraphViewType should support the push model.");
auto const num_vertices = push_graph_view.get_number_of_vertices();
auto const num_edges = push_graph_view.get_number_of_edges();
if (num_vertices == 0) { return; }
// implements the Near-Far Pile method in
// A. Davidson, S. Baxter, M. Garland, and J. D. Owens, "Work-efficient parallel GPU methods for
// single-source shortest paths," 2014.
// 1. check input arguments
CUGRAPH_EXPECTS(push_graph_view.is_valid_vertex(source_vertex),
"Invalid input argument: source vertex out-of-range.");
if (do_expensive_check) {
auto num_negative_edge_weights =
count_if_e(handle,
push_graph_view,
thrust::make_constant_iterator(0) /* dummy */,
thrust::make_constant_iterator(0) /* dummy */,
[] __device__(vertex_t src, vertex_t dst, weight_t w, auto src_val, auto dst_val) {
return w < 0.0;
});
CUGRAPH_EXPECTS(num_negative_edge_weights == 0,
"Invalid input argument: input graph should have non-negative edge weights.");
}
// 2. initialize distances and predecessors
auto constexpr invalid_distance = std::numeric_limits<weight_t>::max();
auto constexpr invalid_vertex = invalid_vertex_id<vertex_t>::value;
auto val_first = thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first));
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
thrust::make_counting_iterator(push_graph_view.get_local_vertex_first()),
thrust::make_counting_iterator(push_graph_view.get_local_vertex_last()),
val_first,
[source_vertex] __device__(auto val) {
auto distance = invalid_distance;
if (val == source_vertex) { distance = weight_t{0.0}; }
return thrust::make_tuple(distance, invalid_vertex);
});
if (num_edges == 0) { return; }
// 3. update delta
weight_t average_vertex_degree{0.0};
weight_t average_edge_weight{0.0};
thrust::tie(average_vertex_degree, average_edge_weight) = transform_reduce_e(
handle,
push_graph_view,
thrust::make_constant_iterator(0) /* dummy */,
thrust::make_constant_iterator(0) /* dummy */,
[] __device__(vertex_t row, vertex_t col, weight_t w, auto row_val, auto col_val) {
return thrust::make_tuple(weight_t{1.0}, w);
},
thrust::make_tuple(weight_t{0.0}, weight_t{0.0}));
average_vertex_degree /= static_cast<weight_t>(num_vertices);
average_edge_weight /= static_cast<weight_t>(num_edges);
auto delta =
(static_cast<weight_t>(raft::warp_size()) * average_edge_weight) / average_vertex_degree;
// 4. initialize SSSP frontier
enum class Bucket { cur_near, new_near, far, num_buckets };
// FIXME: need to double check the bucket sizes are sufficient
std::vector<size_t> bucket_sizes(static_cast<size_t>(Bucket::num_buckets),
push_graph_view.get_number_of_local_vertices());
VertexFrontier<thrust::tuple<weight_t, vertex_t>,
vertex_t,
GraphViewType::is_multi_gpu,
static_cast<size_t>(Bucket::num_buckets)>
vertex_frontier(handle, bucket_sizes);
// 5. SSSP iteration
bool vertex_and_adj_matrix_row_ranges_coincide =
push_graph_view.get_number_of_local_vertices() ==
push_graph_view.get_number_of_local_adj_matrix_partition_rows()
? true
: false;
rmm::device_uvector<weight_t> adj_matrix_row_distances(0, handle.get_stream());
if (!vertex_and_adj_matrix_row_ranges_coincide) {
adj_matrix_row_distances.resize(push_graph_view.get_number_of_local_adj_matrix_partition_rows(),
handle.get_stream());
thrust::fill(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
adj_matrix_row_distances.begin(),
adj_matrix_row_distances.end(),
std::numeric_limits<weight_t>::max());
}
auto row_distances =
!vertex_and_adj_matrix_row_ranges_coincide ? adj_matrix_row_distances.data() : distances;
if (push_graph_view.is_local_vertex_nocheck(source_vertex)) {
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).insert(source_vertex);
}
auto near_far_threshold = delta;
while (true) {
if (!vertex_and_adj_matrix_row_ranges_coincide) {
copy_to_adj_matrix_row(
handle,
push_graph_view,
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).begin(),
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).end(),
distances,
row_distances);
}
vertex_partition_device_t<GraphViewType> vertex_partition(push_graph_view);
update_frontier_v_push_if_out_nbr(
handle,
push_graph_view,
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).begin(),
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).end(),
row_distances,
thrust::make_constant_iterator(0) /* dummy */,
[vertex_partition, distances, cutoff] __device__(
vertex_t src, vertex_t dst, weight_t w, auto src_val, auto dst_val) {
auto push = true;
auto new_distance = src_val + w;
auto threshold = cutoff;
if (vertex_partition.is_local_vertex_nocheck(dst)) {
auto local_vertex_offset =
vertex_partition.get_local_vertex_offset_from_vertex_nocheck(dst);
auto old_distance = *(distances + local_vertex_offset);
threshold = old_distance < threshold ? old_distance : threshold;
}
if (new_distance >= threshold) { push = false; }
return thrust::make_tuple(push, new_distance, src);
},
reduce_op::min<thrust::tuple<weight_t, vertex_t>>(),
distances,
thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first)),
vertex_frontier,
[near_far_threshold] __device__(auto v_val, auto pushed_val) {
auto new_dist = thrust::get<0>(pushed_val);
auto idx = new_dist < v_val
? (new_dist < near_far_threshold ? static_cast<size_t>(Bucket::new_near)
: static_cast<size_t>(Bucket::far))
: VertexFrontier<thrust::tuple<vertex_t>, vertex_t>::kInvalidBucketIdx;
return thrust::make_tuple(idx, thrust::get<0>(pushed_val), thrust::get<1>(pushed_val));
});
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).clear();
if (vertex_frontier.get_bucket(static_cast<size_t>(Bucket::new_near)).aggregate_size() > 0) {
vertex_frontier.swap_buckets(static_cast<size_t>(Bucket::cur_near),
static_cast<size_t>(Bucket::new_near));
} else if (vertex_frontier.get_bucket(static_cast<size_t>(Bucket::far)).aggregate_size() >
0) { // near queue is empty, split the far queue
auto old_near_far_threshold = near_far_threshold;
near_far_threshold += delta;
size_t new_near_size{0};
size_t new_far_size{0};
while (true) {
vertex_frontier.split_bucket(
static_cast<size_t>(Bucket::far),
[vertex_partition, distances, old_near_far_threshold, near_far_threshold] __device__(
auto v) {
auto dist =
*(distances + vertex_partition.get_local_vertex_offset_from_vertex_nocheck(v));
if (dist < old_near_far_threshold) {
return VertexFrontier<thrust::tuple<vertex_t>, vertex_t>::kInvalidBucketIdx;
} else if (dist < near_far_threshold) {
return static_cast<size_t>(Bucket::cur_near);
} else {
return static_cast<size_t>(Bucket::far);
}
});
new_near_size =
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).aggregate_size();
new_far_size =
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::far)).aggregate_size();
if ((new_near_size > 0) || (new_far_size == 0)) {
break;
} else {
near_far_threshold += delta;
}
}
if ((new_near_size == 0) && (new_far_size == 0)) { break; }
} else {
break;
}
}
CUDA_TRY(cudaStreamSynchronize(
handle.get_stream())); // this is as necessary vertex_frontier will become out-of-scope once
// this function returns (FIXME: should I stream sync in VertexFrontier
// destructor?)
}
} // namespace detail
template <typename vertex_t, typename edge_t, typename weight_t, bool multi_gpu>
void sssp(raft::handle_t const &handle,
graph_view_t<vertex_t, edge_t, weight_t, false, multi_gpu> const &graph_view,
weight_t *distances,
vertex_t *predecessors,
vertex_t source_vertex,
weight_t cutoff,
bool do_expensive_check)
{
if (predecessors != nullptr) {
detail::sssp(
handle, graph_view, distances, predecessors, source_vertex, cutoff, do_expensive_check);
} else {
detail::sssp(handle,
graph_view,
distances,
thrust::make_discard_iterator(),
source_vertex,
cutoff,
do_expensive_check);
}
}
// explicit instantiation
template void sssp(raft::handle_t const &handle,
graph_view_t<int32_t, int32_t, float, false, true> const &graph_view,
float *distances,
int32_t *predecessors,
int32_t source_vertex,
float cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int32_t, int32_t, double, false, true> const &graph_view,
double *distances,
int32_t *predecessors,
int32_t source_vertex,
double cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int32_t, int64_t, float, false, true> const &graph_view,
float *distances,
int32_t *predecessors,
int32_t source_vertex,
float cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int32_t, int64_t, double, false, true> const &graph_view,
double *distances,
int32_t *predecessors,
int32_t source_vertex,
double cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int64_t, int64_t, float, false, true> const &graph_view,
float *distances,
int64_t *predecessors,
int64_t source_vertex,
float cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int64_t, int64_t, double, false, true> const &graph_view,
double *distances,
int64_t *predecessors,
int64_t source_vertex,
double cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int32_t, int32_t, float, false, false> const &graph_view,
float *distances,
int32_t *predecessors,
int32_t source_vertex,
float cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int32_t, int32_t, double, false, false> const &graph_view,
double *distances,
int32_t *predecessors,
int32_t source_vertex,
double cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int32_t, int64_t, float, false, false> const &graph_view,
float *distances,
int32_t *predecessors,
int32_t source_vertex,
float cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int32_t, int64_t, double, false, false> const &graph_view,
double *distances,
int32_t *predecessors,
int32_t source_vertex,
double cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int64_t, int64_t, float, false, false> const &graph_view,
float *distances,
int64_t *predecessors,
int64_t source_vertex,
float cutoff,
bool do_expensive_check);
template void sssp(raft::handle_t const &handle,
graph_view_t<int64_t, int64_t, double, false, false> const &graph_view,
double *distances,
int64_t *predecessors,
int64_t source_vertex,
double cutoff,
bool do_expensive_check);
} // namespace experimental
} // namespace cugraph
|
066f7db04108d135c8d1f2fac8ab815c29829c0a.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 066f7db04108d135c8d1f2fac8ab815c29829c0a.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
95059bf32d33596e49455f189dee2993c751dc25.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "common.h"
#include "TH/THHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "THHThrustAllocator.cuh"
#include "THHApply.cuh"
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform.h>
#include <thrust/transform_reduce.h>
#include <thrust/system/hip/execution_policy.h>
template <typename T>
inline __host__ __device__ T eps();
template <>
inline __host__ __device__ float eps() { return 1e-12f; }
template <>
inline __host__ __device__ double eps() { return 1e-12; }
template <typename T>
inline __host__ __device__ T safe_log(T a) {
if (a == 0.)
{
return THCNumerics<T>::log(eps<T>());
}
return THCNumerics<T>::log(a);
}
template <typename Dtype, typename Acctype>
struct bce_functor
{
template <class Tuple>
__host__ __device__
Acctype operator()(Tuple x)
{
Dtype input = thrust::get<0>(x);
Dtype t = thrust::get<1>(x);
assert(input >= 0. && input <= 1.);
return - (t * safe_log<Acctype>(ScalarConvert<Dtype, Acctype>::to(input))
+ (Acctype(1) - t) * safe_log<Acctype>(Acctype(1) - input));
}
};
template <typename Dtype, typename Acctype>
struct bce_updateOutput_no_reduce_functor
{
__forceinline__ __host__ __device__
void operator()(
const Dtype *input,
const Dtype *target,
Dtype *output)
{
assert(*input >= 0. && *input <= 1.);
*output = ScalarConvert<Acctype, Dtype>::to(
-(*target * safe_log<Acctype>(ScalarConvert<Dtype, Acctype>::to(*input)) +
(Acctype(1) - *target) * safe_log<Acctype>(Acctype(1) - *input)));
}
};
template <typename Dtype, typename Acctype>
struct bce_functor_weights
{
template <class Tuple>
__host__ __device__
Acctype operator()(Tuple x)
{
Dtype input = thrust::get<0>(x);
Dtype t = thrust::get<1>(x);
Dtype w = thrust::get<2>(x);
assert(input >= 0. && input <= 1.);
return - w * (t * safe_log<Acctype>(ScalarConvert<Dtype, Acctype>::to(input)) +
(Acctype(1) - t) * safe_log<Acctype>(Acctype(1) - input));
}
};
template <typename Dtype, typename Acctype>
struct bce_updateGradInput_no_reduce_functor
{
__forceinline__ __host__ __device__
void operator()(
const Dtype *x,
const Dtype *t,
Dtype *gradInput)
{
*gradInput = ScalarConvert<Acctype,Dtype>::to(
- (*t - *x) / ((Acctype(1) - *x + eps<Acctype>()) * (*x + eps<Acctype>())));
}
};
template <typename Dtype, typename Acctype>
struct bce_updateGradInput_functor
{
const Dtype norm;
bce_updateGradInput_functor(Dtype norm_)
: norm(norm_)
{}
template <class Tuple>
__host__ __device__
Dtype operator()(Tuple x)
{
Dtype o = thrust::get<0>(x);
Dtype t = thrust::get<1>(x);
return ScalarConvert<Acctype,Dtype>::to(- (t - o) / ((Acctype(1) - o + eps<Acctype>()) * (o + eps<Acctype>())) * norm);
}
};
template <typename Dtype, typename Acctype>
struct bce_updateGradInput_functor_weights
{
const Dtype norm;
bce_updateGradInput_functor_weights(Dtype norm_)
: norm(norm_)
{}
template <class Tuple>
__host__ __device__
Dtype operator()(Tuple x)
{
Dtype o = thrust::get<0>(x);
Dtype t = thrust::get<1>(x);
Dtype w = thrust::get<2>(x);
return ScalarConvert<Acctype, Dtype>::to(- (t - o) / ((Acctype(1) - o + eps<Acctype>()) * (o + eps<Acctype>())) * norm * w);
}
};
#include "generic/BCECriterion.cu"
#include "THHGenerateFloatTypes.h"
| 95059bf32d33596e49455f189dee2993c751dc25.cu | #include "THCUNN.h"
#include "common.h"
#include "TH/THHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "THCThrustAllocator.cuh"
#include "THCApply.cuh"
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform.h>
#include <thrust/transform_reduce.h>
#include <thrust/system/cuda/execution_policy.h>
template <typename T>
inline __host__ __device__ T eps();
template <>
inline __host__ __device__ float eps() { return 1e-12f; }
template <>
inline __host__ __device__ double eps() { return 1e-12; }
template <typename T>
inline __host__ __device__ T safe_log(T a) {
if (a == 0.)
{
return THCNumerics<T>::log(eps<T>());
}
return THCNumerics<T>::log(a);
}
template <typename Dtype, typename Acctype>
struct bce_functor
{
template <class Tuple>
__host__ __device__
Acctype operator()(Tuple x)
{
Dtype input = thrust::get<0>(x);
Dtype t = thrust::get<1>(x);
assert(input >= 0. && input <= 1.);
return - (t * safe_log<Acctype>(ScalarConvert<Dtype, Acctype>::to(input))
+ (Acctype(1) - t) * safe_log<Acctype>(Acctype(1) - input));
}
};
template <typename Dtype, typename Acctype>
struct bce_updateOutput_no_reduce_functor
{
__forceinline__ __host__ __device__
void operator()(
const Dtype *input,
const Dtype *target,
Dtype *output)
{
assert(*input >= 0. && *input <= 1.);
*output = ScalarConvert<Acctype, Dtype>::to(
-(*target * safe_log<Acctype>(ScalarConvert<Dtype, Acctype>::to(*input)) +
(Acctype(1) - *target) * safe_log<Acctype>(Acctype(1) - *input)));
}
};
template <typename Dtype, typename Acctype>
struct bce_functor_weights
{
template <class Tuple>
__host__ __device__
Acctype operator()(Tuple x)
{
Dtype input = thrust::get<0>(x);
Dtype t = thrust::get<1>(x);
Dtype w = thrust::get<2>(x);
assert(input >= 0. && input <= 1.);
return - w * (t * safe_log<Acctype>(ScalarConvert<Dtype, Acctype>::to(input)) +
(Acctype(1) - t) * safe_log<Acctype>(Acctype(1) - input));
}
};
template <typename Dtype, typename Acctype>
struct bce_updateGradInput_no_reduce_functor
{
__forceinline__ __host__ __device__
void operator()(
const Dtype *x,
const Dtype *t,
Dtype *gradInput)
{
*gradInput = ScalarConvert<Acctype,Dtype>::to(
- (*t - *x) / ((Acctype(1) - *x + eps<Acctype>()) * (*x + eps<Acctype>())));
}
};
template <typename Dtype, typename Acctype>
struct bce_updateGradInput_functor
{
const Dtype norm;
bce_updateGradInput_functor(Dtype norm_)
: norm(norm_)
{}
template <class Tuple>
__host__ __device__
Dtype operator()(Tuple x)
{
Dtype o = thrust::get<0>(x);
Dtype t = thrust::get<1>(x);
return ScalarConvert<Acctype,Dtype>::to(- (t - o) / ((Acctype(1) - o + eps<Acctype>()) * (o + eps<Acctype>())) * norm);
}
};
template <typename Dtype, typename Acctype>
struct bce_updateGradInput_functor_weights
{
const Dtype norm;
bce_updateGradInput_functor_weights(Dtype norm_)
: norm(norm_)
{}
template <class Tuple>
__host__ __device__
Dtype operator()(Tuple x)
{
Dtype o = thrust::get<0>(x);
Dtype t = thrust::get<1>(x);
Dtype w = thrust::get<2>(x);
return ScalarConvert<Acctype, Dtype>::to(- (t - o) / ((Acctype(1) - o + eps<Acctype>()) * (o + eps<Acctype>())) * norm * w);
}
};
#include "generic/BCECriterion.cu"
#include "THCGenerateFloatTypes.h"
|
fd524bbf6e952b525d1d7f97f005faaf1f27ecb2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_sobel.cuh"
#include "parameters.cuh"
__global__ void gpu_sobel(u_char *Source, u_char *Resultat, unsigned int height, unsigned int width) {
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
u_char val;
int globalIndex = i*width+j;
if ((i==0)||(i>=height-1)||(j==0)||(j>=width-1)) {Resultat[globalIndex]=0;}
else {
val = std::abs(Source[(i-1)*width+(j-1)] + Source[(i-1)*width+(j)] + Source[(i-1)*width+(j+1)] -\
(Source[(i+1)*width+(j-1)] + Source[(i+1)*width+(j)] + Source[(i+1)*width+(j+1)]));
Resultat[globalIndex] = val + std::abs(Source[(i-1)*width+(j-1)] + Source[(i)*width+(j-1)] + Source[(i+1)*width+(j-1)] -\
(Source[(i-1)*width+(j+1)] + Source[(i)*width+(j+1)] + Source[(i+1)*width+(j+1)]));
}
}
__global__ void gpu_sobel_shared(u_char *Source, u_char *Resultat, unsigned int height, unsigned int width) {
__shared__ u_char tuile[BLOCKDIM_X][BLOCKDIM_Y];
int x = threadIdx.x;
int y = threadIdx.y;
int i = blockIdx.y*(BLOCKDIM_Y-2) + y;
int j = blockIdx.x*(BLOCKDIM_X-2) + x;
int globalIndex = i*width+j;
if ((i==0)||(i>=height-1)||(j==0)||(j>=width-1)) {}
else {
//mainstream
tuile[x][y] = Source[globalIndex];
__syncthreads();
u_char val;
if ((x>0)&&(y>0)&&(x<BLOCKDIM_X-1)&&(y<BLOCKDIM_Y-1)) {
val = std::abs(tuile[x-1][y-1] + tuile[x-1][y] + tuile[x-1][y+1] -\
(tuile[x+1][y-1] + tuile[x+1][y] + tuile[x+1][y+1]));
Resultat[globalIndex] = val + std::abs(tuile[x-1][y-1] + tuile[x][y-1] + tuile[x+1][y-1] -\
(tuile[x-1][y+1] + tuile[x][y+1] + tuile[x+1][y+1]));
}
}
} | fd524bbf6e952b525d1d7f97f005faaf1f27ecb2.cu | #include "gpu_sobel.cuh"
#include "parameters.cuh"
__global__ void gpu_sobel(u_char *Source, u_char *Resultat, unsigned int height, unsigned int width) {
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
u_char val;
int globalIndex = i*width+j;
if ((i==0)||(i>=height-1)||(j==0)||(j>=width-1)) {Resultat[globalIndex]=0;}
else {
val = std::abs(Source[(i-1)*width+(j-1)] + Source[(i-1)*width+(j)] + Source[(i-1)*width+(j+1)] -\
(Source[(i+1)*width+(j-1)] + Source[(i+1)*width+(j)] + Source[(i+1)*width+(j+1)]));
Resultat[globalIndex] = val + std::abs(Source[(i-1)*width+(j-1)] + Source[(i)*width+(j-1)] + Source[(i+1)*width+(j-1)] -\
(Source[(i-1)*width+(j+1)] + Source[(i)*width+(j+1)] + Source[(i+1)*width+(j+1)]));
}
}
__global__ void gpu_sobel_shared(u_char *Source, u_char *Resultat, unsigned int height, unsigned int width) {
__shared__ u_char tuile[BLOCKDIM_X][BLOCKDIM_Y];
int x = threadIdx.x;
int y = threadIdx.y;
int i = blockIdx.y*(BLOCKDIM_Y-2) + y;
int j = blockIdx.x*(BLOCKDIM_X-2) + x;
int globalIndex = i*width+j;
if ((i==0)||(i>=height-1)||(j==0)||(j>=width-1)) {}
else {
//mainstream
tuile[x][y] = Source[globalIndex];
__syncthreads();
u_char val;
if ((x>0)&&(y>0)&&(x<BLOCKDIM_X-1)&&(y<BLOCKDIM_Y-1)) {
val = std::abs(tuile[x-1][y-1] + tuile[x-1][y] + tuile[x-1][y+1] -\
(tuile[x+1][y-1] + tuile[x+1][y] + tuile[x+1][y+1]));
Resultat[globalIndex] = val + std::abs(tuile[x-1][y-1] + tuile[x][y-1] + tuile[x+1][y-1] -\
(tuile[x-1][y+1] + tuile[x][y+1] + tuile[x+1][y+1]));
}
}
} |
5efb6df829756f9e35b1f28a4151c09030675942.hip | // !!! This is a file automatically generated by hipify!!!
//=================================================================//
// CUDA Betweenness Centr kernel
// Topological-Driven: one node per thread, thread_centric,
// no atomicAdd instructions
//
// Reference:
// A. E. Sariyuce, et al. Betweenness Centrality on GPUs and
// Heterogeneous Architectures
//=================================================================//
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <stdio.h>
#include "cudaGraph.h"
__global__ void kernel_init(unsigned * d_dist, unsigned * d_sigma,
float * d_delta, uint64_t num_vertex, uint64_t root)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < num_vertex )
{
d_dist[tid] = MY_INFINITY;
d_sigma[tid] = 0;
d_delta[tid] = 0;
if (tid==root)
{
d_dist[tid]=0;
d_sigma[tid]=1;
}
}
}
__global__ void kernel_forward_phase(cudaGraph graph,
unsigned * d_dist, unsigned * d_sigma,
bool * d_over, unsigned curr)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
if (d_dist[tid] != curr) return;
uint64_t u = tid;
uint64_t start, end;
start = graph.get_firstedge_index(u);
end = graph.get_edge_index_end(u);
for (uint64_t i=start; i<end; i++)
{
uint64_t w = graph.get_edge_dest(i);
if (d_dist[w] == MY_INFINITY)
{
d_dist[w] = curr+1;
*d_over = false;
}
if (d_dist[w] == (curr+1))
{
atomicAdd(&(d_sigma[w]), d_sigma[u]);
}
}
}
__global__ void kernel_backward_phase(cudaGraph graph,
unsigned * d_dist, unsigned * d_sigma,
float * d_delta, unsigned curr)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
if (d_dist[tid] != (curr-1)) return;
uint64_t u = tid;
float sum = 0;
uint64_t start, end;
start = graph.get_firstedge_index(u);
end = graph.get_edge_index_end(u);
for (uint64_t i=start; i<end; i++)
{
uint64_t w = graph.get_edge_dest(i);
if (d_dist[w] == curr)
{
sum += 1.0*d_sigma[u]/d_sigma[w]*(1.0+d_delta[w]);
}
}
d_delta[u] += sum;
}
__global__ void kernel_backsum_phase(cudaGraph graph,
float * d_BC, float * d_delta,
unsigned * d_dist, uint64_t root)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
if (tid == root) return;
if (d_dist[tid] == MY_INFINITY) return;
d_BC[tid] += d_delta[tid];
}
void cuda_betweenness_centr(uint64_t * vertexlist,
uint64_t * edgelist, float * vproplist,
uint64_t vertex_cnt, uint64_t edge_cnt)
{
float * device_BC = 0, * device_delta = 0;
unsigned * device_dist = 0, * device_sigma = 0;
bool * device_over = 0;
float h2d_copy_time = 0; // host to device data transfer time
float d2h_copy_time = 0; // device to host data transfer time
float kernel_time = 0; // kernel execution time
int device;
hipGetDevice(&device);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp,device);
// Try to use as many threads as possible so that each thread
// is processing one vertex. If max thread is reached,
// split them into multiple blocks.
unsigned int num_thread_per_block = (unsigned int) vertex_cnt;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block );
// malloc of gpu side
cudaErrCheck( hipMalloc((void**)&device_BC, vertex_cnt*sizeof(float)) );
cudaErrCheck( hipMemset(device_BC, 0, vertex_cnt*sizeof(float)) );
cudaErrCheck( hipMalloc((void**)&device_delta, vertex_cnt*sizeof(float)) );
cudaErrCheck( hipMalloc((void**)&device_dist, vertex_cnt*sizeof(unsigned)) );
cudaErrCheck( hipMalloc((void**)&device_sigma, vertex_cnt*sizeof(unsigned)) );
cudaErrCheck( hipMalloc((void**)&device_over, sizeof(bool)) );
hipEvent_t start_event, stop_event;
cudaErrCheck( hipEventCreate(&start_event) );
cudaErrCheck( hipEventCreate(&stop_event) );
// prepare graph struct
// one for host side, one for device side
cudaGraph h_graph, d_graph;
// here copy only the pointers
h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt);
// memcpy from host to device
hipEventRecord(start_event, 0);
// copy graph data to device
h_graph.cudaGraphCopy(&d_graph);
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&h2d_copy_time, start_event, stop_event);
hipEventRecord(start_event, 0);
for (unsigned root=0;root<vertex_cnt;root++)
{
hipLaunchKernelGGL(( kernel_init), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_dist, device_sigma,
device_delta, vertex_cnt, root);
bool stop;
unsigned curr=0;
do
{
// Each iteration processes
// one level of BFS traversal
stop = true;
cudaErrCheck( hipMemcpy(device_over, &stop, sizeof(bool), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( kernel_forward_phase), dim3(num_block), dim3(num_thread_per_block), 0, 0, d_graph, device_dist, device_sigma,
device_over, curr);
cudaErrCheck( hipMemcpy(&stop, device_over, sizeof(bool), hipMemcpyDeviceToHost) );
curr++;
}while(!stop);
while(curr>1)
{
curr--;
hipLaunchKernelGGL(( kernel_backward_phase), dim3(num_block), dim3(num_thread_per_block), 0, 0, d_graph, device_dist, device_sigma,
device_delta, curr);
}
hipLaunchKernelGGL(( kernel_backsum_phase), dim3(num_block), dim3(num_thread_per_block), 0, 0, d_graph, device_BC, device_delta,
device_dist, root);
}
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&kernel_time, start_event, stop_event);
hipEventRecord(start_event, 0);
cudaErrCheck( hipMemcpy(vproplist, device_BC, vertex_cnt*sizeof(uint32_t),
hipMemcpyDeviceToHost) );
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&d2h_copy_time, start_event, stop_event);
#ifndef ENABLE_VERIFY
printf("== host->device copy time: %f ms\n", h2d_copy_time);
printf("== device->host copy time: %f ms\n", d2h_copy_time);
printf("== kernel time: %f ms\n", kernel_time);
#endif
hipEventDestroy(start_event);
hipEventDestroy(stop_event);
// free graph struct on device side
d_graph.cudaGraphFree();
cudaErrCheck( hipFree(device_BC) );
cudaErrCheck( hipFree(device_delta) );
cudaErrCheck( hipFree(device_sigma) );
cudaErrCheck( hipFree(device_dist) );
cudaErrCheck( hipFree(device_over) );
}
| 5efb6df829756f9e35b1f28a4151c09030675942.cu | //=================================================================//
// CUDA Betweenness Centr kernel
// Topological-Driven: one node per thread, thread_centric,
// no atomicAdd instructions
//
// Reference:
// A. E. Sariyuce, et al. Betweenness Centrality on GPUs and
// Heterogeneous Architectures
//=================================================================//
#include <cuda.h>
#include <stdint.h>
#include <stdio.h>
#include "cudaGraph.h"
__global__ void kernel_init(unsigned * d_dist, unsigned * d_sigma,
float * d_delta, uint64_t num_vertex, uint64_t root)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < num_vertex )
{
d_dist[tid] = MY_INFINITY;
d_sigma[tid] = 0;
d_delta[tid] = 0;
if (tid==root)
{
d_dist[tid]=0;
d_sigma[tid]=1;
}
}
}
__global__ void kernel_forward_phase(cudaGraph graph,
unsigned * d_dist, unsigned * d_sigma,
bool * d_over, unsigned curr)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
if (d_dist[tid] != curr) return;
uint64_t u = tid;
uint64_t start, end;
start = graph.get_firstedge_index(u);
end = graph.get_edge_index_end(u);
for (uint64_t i=start; i<end; i++)
{
uint64_t w = graph.get_edge_dest(i);
if (d_dist[w] == MY_INFINITY)
{
d_dist[w] = curr+1;
*d_over = false;
}
if (d_dist[w] == (curr+1))
{
atomicAdd(&(d_sigma[w]), d_sigma[u]);
}
}
}
__global__ void kernel_backward_phase(cudaGraph graph,
unsigned * d_dist, unsigned * d_sigma,
float * d_delta, unsigned curr)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
if (d_dist[tid] != (curr-1)) return;
uint64_t u = tid;
float sum = 0;
uint64_t start, end;
start = graph.get_firstedge_index(u);
end = graph.get_edge_index_end(u);
for (uint64_t i=start; i<end; i++)
{
uint64_t w = graph.get_edge_dest(i);
if (d_dist[w] == curr)
{
sum += 1.0*d_sigma[u]/d_sigma[w]*(1.0+d_delta[w]);
}
}
d_delta[u] += sum;
}
__global__ void kernel_backsum_phase(cudaGraph graph,
float * d_BC, float * d_delta,
unsigned * d_dist, uint64_t root)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
if (tid == root) return;
if (d_dist[tid] == MY_INFINITY) return;
d_BC[tid] += d_delta[tid];
}
void cuda_betweenness_centr(uint64_t * vertexlist,
uint64_t * edgelist, float * vproplist,
uint64_t vertex_cnt, uint64_t edge_cnt)
{
float * device_BC = 0, * device_delta = 0;
unsigned * device_dist = 0, * device_sigma = 0;
bool * device_over = 0;
float h2d_copy_time = 0; // host to device data transfer time
float d2h_copy_time = 0; // device to host data transfer time
float kernel_time = 0; // kernel execution time
int device;
cudaGetDevice(&device);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp,device);
// Try to use as many threads as possible so that each thread
// is processing one vertex. If max thread is reached,
// split them into multiple blocks.
unsigned int num_thread_per_block = (unsigned int) vertex_cnt;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block );
// malloc of gpu side
cudaErrCheck( cudaMalloc((void**)&device_BC, vertex_cnt*sizeof(float)) );
cudaErrCheck( cudaMemset(device_BC, 0, vertex_cnt*sizeof(float)) );
cudaErrCheck( cudaMalloc((void**)&device_delta, vertex_cnt*sizeof(float)) );
cudaErrCheck( cudaMalloc((void**)&device_dist, vertex_cnt*sizeof(unsigned)) );
cudaErrCheck( cudaMalloc((void**)&device_sigma, vertex_cnt*sizeof(unsigned)) );
cudaErrCheck( cudaMalloc((void**)&device_over, sizeof(bool)) );
cudaEvent_t start_event, stop_event;
cudaErrCheck( cudaEventCreate(&start_event) );
cudaErrCheck( cudaEventCreate(&stop_event) );
// prepare graph struct
// one for host side, one for device side
cudaGraph h_graph, d_graph;
// here copy only the pointers
h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt);
// memcpy from host to device
cudaEventRecord(start_event, 0);
// copy graph data to device
h_graph.cudaGraphCopy(&d_graph);
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&h2d_copy_time, start_event, stop_event);
cudaEventRecord(start_event, 0);
for (unsigned root=0;root<vertex_cnt;root++)
{
kernel_init<<<num_block, num_thread_per_block>>>(device_dist, device_sigma,
device_delta, vertex_cnt, root);
bool stop;
unsigned curr=0;
do
{
// Each iteration processes
// one level of BFS traversal
stop = true;
cudaErrCheck( cudaMemcpy(device_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) );
kernel_forward_phase<<<num_block, num_thread_per_block>>>(d_graph, device_dist, device_sigma,
device_over, curr);
cudaErrCheck( cudaMemcpy(&stop, device_over, sizeof(bool), cudaMemcpyDeviceToHost) );
curr++;
}while(!stop);
while(curr>1)
{
curr--;
kernel_backward_phase<<<num_block, num_thread_per_block>>>(d_graph, device_dist, device_sigma,
device_delta, curr);
}
kernel_backsum_phase<<<num_block, num_thread_per_block>>>(d_graph, device_BC, device_delta,
device_dist, root);
}
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&kernel_time, start_event, stop_event);
cudaEventRecord(start_event, 0);
cudaErrCheck( cudaMemcpy(vproplist, device_BC, vertex_cnt*sizeof(uint32_t),
cudaMemcpyDeviceToHost) );
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&d2h_copy_time, start_event, stop_event);
#ifndef ENABLE_VERIFY
printf("== host->device copy time: %f ms\n", h2d_copy_time);
printf("== device->host copy time: %f ms\n", d2h_copy_time);
printf("== kernel time: %f ms\n", kernel_time);
#endif
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
// free graph struct on device side
d_graph.cudaGraphFree();
cudaErrCheck( cudaFree(device_BC) );
cudaErrCheck( cudaFree(device_delta) );
cudaErrCheck( cudaFree(device_sigma) );
cudaErrCheck( cudaFree(device_dist) );
cudaErrCheck( cudaFree(device_over) );
}
|
8b43169b387ac03abe5e9e7f215a3ff5a61c38c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/UnfoldBackward.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/hip/HIPContext.h>
#include <vector>
// Note on naming: it is unconventional.
// grad_in does not mean that it is a gradient wrt to input,
// grad_in/grad_out is just an input/output of unfold_backward kernel.
//
// unfold_backward, the algorithm is described in
// /native/cpu/UnfoldBackwardKernel.cpp
namespace at { namespace native {
namespace {
template <int n_threads, int n_elems_per_thread, typename func_t>
C10_LAUNCH_BOUNDS_2(n_threads, n_elems_per_thread)
__global__ void _unfold_backward_elementwise_kernel(int total_n_elems, func_t f) {
constexpr int total_work_block = n_threads * n_elems_per_thread;
int idx = total_work_block * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < n_elems_per_thread; ++i) {
if (idx < total_n_elems) {
f(idx);
idx += n_threads;
}
}
}
template <int n_threads, int n_elems_per_thread, typename func_t>
static void _launch_unfold_backward_kernel(int total_n_elems, func_t f) {
TORCH_INTERNAL_ASSERT(
total_n_elems >= 0 && total_n_elems <= std::numeric_limits<int32_t>::max()
);
dim3 block(n_threads);
constexpr int total_work_block = n_threads * n_elems_per_thread;
dim3 grid((total_n_elems + total_work_block - 1) / total_work_block);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( _unfold_backward_elementwise_kernel<n_threads, n_elems_per_thread, func_t>)
, dim3(grid), dim3(block), 0, stream, total_n_elems, f);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t>
void _unfold_backward_internal_kernel(
TensorIterator& iter,
int64_t size,
int64_t step,
int64_t grad_in_dim_stride,
int64_t grad_in_last_dim_stride,
int64_t grad_in_dim_size,
int64_t grad_out_dim_stride,
bool is_step_ge_size
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_unfold_backward_internal_kernel<scalar_t>(
sub_iter,
size,
step,
grad_in_dim_stride,
grad_in_last_dim_stride,
grad_in_dim_size,
grad_out_dim_stride,
is_step_ge_size
);
}
return;
}
char* __restrict__ grad_out_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ grad_in_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
char* __restrict__ idx_dim_ptr = reinterpret_cast<char*>(iter.data_ptr(2));
if (is_step_ge_size) {
char* __restrict__ idx_last_dim_ptr = reinterpret_cast<char*>(iter.data_ptr(3));
auto offset_calc = make_offset_calculator<4>(iter);
// this loop simply copies the data
// from proper places in grad_out to grad_in
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ grad_out_data = reinterpret_cast<scalar_t*>(grad_out_ptr + offsets[0]);
auto* __restrict__ grad_in_data = reinterpret_cast<scalar_t*>(grad_in_ptr + offsets[1]);
auto idx_dim = *reinterpret_cast<int64_t*>(idx_dim_ptr + offsets[2]);
auto idx_last_dim = *reinterpret_cast<int64_t*>(idx_last_dim_ptr + offsets[3]);
auto grad_out_idx_dim = idx_dim * step + idx_last_dim;
grad_out_data[grad_out_idx_dim * grad_out_dim_stride] = *grad_in_data;
};
_launch_unfold_backward_kernel<num_threads(), thread_work_size()>(iter.numel(), loop);
}
else {
auto offset_calc = make_offset_calculator<3>(iter);
// The algorithm is: for each index in grad_out find
// the elements contributing to it and sum them up.
// Note: the algorithm does not require any synchronization.
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ grad_out_data = reinterpret_cast<scalar_t*>(grad_out_ptr + offsets[0]);
auto* __restrict__ grad_in_data = reinterpret_cast<scalar_t*>(grad_in_ptr + offsets[1]);
auto idx_dim = *reinterpret_cast<int64_t*>(idx_dim_ptr + offsets[2]);
// left_fold potentially intersecting with idx_dim
// is either (idx_dim - size) / step or the next integer.
int64_t left_fold_idx = (idx_dim > size) ? (idx_dim - size) / step : 0;
if (!(left_fold_idx * step <= idx_dim && idx_dim < left_fold_idx * step + size)) {
++left_fold_idx;
}
auto right_fold_idx = idx_dim / step;
right_fold_idx = (right_fold_idx >= grad_in_dim_size) ?
(grad_in_dim_size - 1) : right_fold_idx;
for (auto fold_idx = left_fold_idx; fold_idx <= right_fold_idx; ++fold_idx) {
auto idx_last_dim = idx_dim - fold_idx * step;
*grad_out_data += grad_in_data[fold_idx * grad_in_dim_stride
+ idx_last_dim * grad_in_last_dim_stride];
}
};
_launch_unfold_backward_kernel<num_threads(), thread_work_size()>(iter.numel(), loop);
}
}
void unfold_backward_cuda_kernel(
Tensor& grad_out,
const Tensor& grad_in,
int64_t dim,
int64_t size,
int64_t step
) {
dim = maybe_wrap_dim(dim, grad_out.dim());
// last dim stores the folds
auto last_dim = maybe_wrap_dim(-1, grad_in.dim());
auto grad_in_dim_stride = ensure_nonempty_stride(grad_in, dim);
auto grad_in_last_dim_stride = ensure_nonempty_stride(grad_in, last_dim);
auto grad_in_dim_size = ensure_nonempty_size(grad_in, dim);
auto grad_out_dim_stride = ensure_nonempty_stride(grad_out, dim);
auto is_step_ge_size = (step >= size);
TensorIterator iter =
is_step_ge_size ?
_make_unfold_backward_iter_over_grad_in(
grad_out, grad_in, dim, size, step
) :
_make_unfold_backward_iter_over_grad_out(
grad_out, grad_in, dim, size, step
);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
"unfold_backward_cuda", [&] {
_unfold_backward_internal_kernel<scalar_t>(
iter,
size,
step,
grad_in_dim_stride,
grad_in_last_dim_stride,
grad_in_dim_size,
grad_out_dim_stride,
is_step_ge_size
);
}
);
}
}
REGISTER_DISPATCH(unfold_backward_stub, &unfold_backward_cuda_kernel);
}} // namespace at::native
| 8b43169b387ac03abe5e9e7f215a3ff5a61c38c8.cu | #include <ATen/native/UnfoldBackward.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <vector>
// Note on naming: it is unconventional.
// grad_in does not mean that it is a gradient wrt to input,
// grad_in/grad_out is just an input/output of unfold_backward kernel.
//
// unfold_backward, the algorithm is described in
// /native/cpu/UnfoldBackwardKernel.cpp
namespace at { namespace native {
namespace {
template <int n_threads, int n_elems_per_thread, typename func_t>
C10_LAUNCH_BOUNDS_2(n_threads, n_elems_per_thread)
__global__ void _unfold_backward_elementwise_kernel(int total_n_elems, func_t f) {
constexpr int total_work_block = n_threads * n_elems_per_thread;
int idx = total_work_block * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < n_elems_per_thread; ++i) {
if (idx < total_n_elems) {
f(idx);
idx += n_threads;
}
}
}
template <int n_threads, int n_elems_per_thread, typename func_t>
static void _launch_unfold_backward_kernel(int total_n_elems, func_t f) {
TORCH_INTERNAL_ASSERT(
total_n_elems >= 0 && total_n_elems <= std::numeric_limits<int32_t>::max()
);
dim3 block(n_threads);
constexpr int total_work_block = n_threads * n_elems_per_thread;
dim3 grid((total_n_elems + total_work_block - 1) / total_work_block);
auto stream = at::cuda::getCurrentCUDAStream();
_unfold_backward_elementwise_kernel<n_threads, n_elems_per_thread, func_t>
<<<grid, block, 0, stream>>>(total_n_elems, f);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t>
void _unfold_backward_internal_kernel(
TensorIterator& iter,
int64_t size,
int64_t step,
int64_t grad_in_dim_stride,
int64_t grad_in_last_dim_stride,
int64_t grad_in_dim_size,
int64_t grad_out_dim_stride,
bool is_step_ge_size
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_unfold_backward_internal_kernel<scalar_t>(
sub_iter,
size,
step,
grad_in_dim_stride,
grad_in_last_dim_stride,
grad_in_dim_size,
grad_out_dim_stride,
is_step_ge_size
);
}
return;
}
char* __restrict__ grad_out_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ grad_in_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
char* __restrict__ idx_dim_ptr = reinterpret_cast<char*>(iter.data_ptr(2));
if (is_step_ge_size) {
char* __restrict__ idx_last_dim_ptr = reinterpret_cast<char*>(iter.data_ptr(3));
auto offset_calc = make_offset_calculator<4>(iter);
// this loop simply copies the data
// from proper places in grad_out to grad_in
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ grad_out_data = reinterpret_cast<scalar_t*>(grad_out_ptr + offsets[0]);
auto* __restrict__ grad_in_data = reinterpret_cast<scalar_t*>(grad_in_ptr + offsets[1]);
auto idx_dim = *reinterpret_cast<int64_t*>(idx_dim_ptr + offsets[2]);
auto idx_last_dim = *reinterpret_cast<int64_t*>(idx_last_dim_ptr + offsets[3]);
auto grad_out_idx_dim = idx_dim * step + idx_last_dim;
grad_out_data[grad_out_idx_dim * grad_out_dim_stride] = *grad_in_data;
};
_launch_unfold_backward_kernel<num_threads(), thread_work_size()>(iter.numel(), loop);
}
else {
auto offset_calc = make_offset_calculator<3>(iter);
// The algorithm is: for each index in grad_out find
// the elements contributing to it and sum them up.
// Note: the algorithm does not require any synchronization.
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ grad_out_data = reinterpret_cast<scalar_t*>(grad_out_ptr + offsets[0]);
auto* __restrict__ grad_in_data = reinterpret_cast<scalar_t*>(grad_in_ptr + offsets[1]);
auto idx_dim = *reinterpret_cast<int64_t*>(idx_dim_ptr + offsets[2]);
// left_fold potentially intersecting with idx_dim
// is either (idx_dim - size) / step or the next integer.
int64_t left_fold_idx = (idx_dim > size) ? (idx_dim - size) / step : 0;
if (!(left_fold_idx * step <= idx_dim && idx_dim < left_fold_idx * step + size)) {
++left_fold_idx;
}
auto right_fold_idx = idx_dim / step;
right_fold_idx = (right_fold_idx >= grad_in_dim_size) ?
(grad_in_dim_size - 1) : right_fold_idx;
for (auto fold_idx = left_fold_idx; fold_idx <= right_fold_idx; ++fold_idx) {
auto idx_last_dim = idx_dim - fold_idx * step;
*grad_out_data += grad_in_data[fold_idx * grad_in_dim_stride
+ idx_last_dim * grad_in_last_dim_stride];
}
};
_launch_unfold_backward_kernel<num_threads(), thread_work_size()>(iter.numel(), loop);
}
}
void unfold_backward_cuda_kernel(
Tensor& grad_out,
const Tensor& grad_in,
int64_t dim,
int64_t size,
int64_t step
) {
dim = maybe_wrap_dim(dim, grad_out.dim());
// last dim stores the folds
auto last_dim = maybe_wrap_dim(-1, grad_in.dim());
auto grad_in_dim_stride = ensure_nonempty_stride(grad_in, dim);
auto grad_in_last_dim_stride = ensure_nonempty_stride(grad_in, last_dim);
auto grad_in_dim_size = ensure_nonempty_size(grad_in, dim);
auto grad_out_dim_stride = ensure_nonempty_stride(grad_out, dim);
auto is_step_ge_size = (step >= size);
TensorIterator iter =
is_step_ge_size ?
_make_unfold_backward_iter_over_grad_in(
grad_out, grad_in, dim, size, step
) :
_make_unfold_backward_iter_over_grad_out(
grad_out, grad_in, dim, size, step
);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
"unfold_backward_cuda", [&] {
_unfold_backward_internal_kernel<scalar_t>(
iter,
size,
step,
grad_in_dim_stride,
grad_in_last_dim_stride,
grad_in_dim_size,
grad_out_dim_stride,
is_step_ge_size
);
}
);
}
}
REGISTER_DISPATCH(unfold_backward_stub, &unfold_backward_cuda_kernel);
}} // namespace at::native
|
3fd597f59b7acd6646da9e997b630fb8f60c96fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/util/cuda_dnn_interface.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/kernel/util/cuda_half_util.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void ReluForwardGpu(const int n, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = x[i] > 0 ? x[i] : 0; }
}
template<>
__global__ void ReluForwardGpu<half>(const int n, const half* x, half* y) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
CUDA_1D_KERNEL_LOOP(i, n) {
if (__hgt(x[i], hzero())) {
y[i] = x[i];
} else {
y[i] = hzero();
}
}
#else
HALF_CHECK_FAILED;
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) */
}
template<typename T>
__global__ void InplaceReluForwardGpu(const int n, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
// There is a subtle cuda bug in (y[i] <= 0)
if (!(y[i] > 0)) { y[i] = 0; }
}
}
template<>
__global__ void InplaceReluForwardGpu<half>(const int n, half* y) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
CUDA_1D_KERNEL_LOOP(i, n) {
if (!__hgt(y[i], hzero())) { y[i] = hzero(); }
}
#else
HALF_CHECK_FAILED;
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) */
}
template<typename T>
__global__ void ReluBackwardGpu(const int n, const T* y, const T* dy, T* dx) {
CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = y[i] > 0 ? dy[i] : 0; }
}
template<>
__global__ void ReluBackwardGpu<half>(const int n, const half* y, const half* dy, half* dx) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
half zero = __float2half(0.0);
CUDA_1D_KERNEL_LOOP(i, n) {
if (__hgt(y[i], zero)) {
dx[i] = dy[i];
} else {
dx[i] = zero;
}
}
#else
HALF_CHECK_FAILED;
#endif // __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
}
template<typename T>
__global__ void InplaceReluBackwardGpu(const int n, const T* y, T* dx) {
CUDA_1D_KERNEL_LOOP(i, n) {
if (!(y[i] > 0)) { dx[i] = 0; }
}
}
template<>
__global__ void InplaceReluBackwardGpu<half>(const int n, const half* y, half* dx) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
half zero = __float2half(0.0);
CUDA_1D_KERNEL_LOOP(i, n) {
if (!__hgt(y[i], zero)) { dx[i] = zero; }
}
#else
HALF_CHECK_FAILED;
#endif // __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
}
template<typename T>
__global__ void SigmoidForwardGpu(const int n, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = 1.0 / (1.0 + ::exp(-x[i])); }
}
template<>
__global__ void SigmoidForwardGpu<half>(const int n, const half* x, half* y) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = __hdiv(hone(), __hadd(hone(), hexp(__hneg(x[i])))); }
#else
HALF_CHECK_FAILED;
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) */
}
template<typename T>
__global__ void SigmoidBackwardGpu(const int n, const T* y, const T* dy, T* dx) {
CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = dy[i] * y[i] * (1.0 - y[i]); }
}
template<>
__global__ void SigmoidBackwardGpu<half>(const int n, const half* y, const half* dy, half* dx) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = __hmul(dy[i], __hmul(y[i], __hsub(hone(), y[i]))); }
#else
HALF_CHECK_FAILED;
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) */
}
template<typename T>
struct ReluHelper final {
static void ReluForward(ep::Stream* stream, const int64_t n, const T* x, T* y) {
CHECK_LE(n, GetMaxVal<int32_t>() / 2);
if (n == 0) { return; }
if (x == y) {
hipLaunchKernelGGL(( InplaceReluForwardGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(), n, y);
} else {
hipLaunchKernelGGL(( ReluForwardGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(), n, x, y);
}
}
static void ReluBackward(ep::Stream* stream, const int64_t n, const T* y, const T* dy, T* dx) {
CHECK_LE(n, GetMaxVal<int32_t>() / 2);
if (dy == dx) {
hipLaunchKernelGGL(( InplaceReluBackwardGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(), n, y, dx);
} else {
hipLaunchKernelGGL(( ReluBackwardGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(), n, y, dy, dx);
}
}
};
} // namespace
void DnnIf<DeviceType::kGPU>::Relu(ep::Stream* stream, const int64_t n, const float* x, float* y) {
ReluHelper<float>::ReluForward(stream, n, x, y);
}
void DnnIf<DeviceType::kGPU>::Relu(ep::Stream* stream, const int64_t n, const double* x,
double* y) {
ReluHelper<double>::ReluForward(stream, n, x, y);
}
void DnnIf<DeviceType::kGPU>::Relu(ep::Stream* stream, const int64_t n, const float16* x,
float16* y) {
ReluHelper<half>::ReluForward(stream, n, reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
}
void DnnIf<DeviceType::kGPU>::ReluBackward(ep::Stream* stream, const int64_t n, const float* x,
const float* y, const float* dy, float* dx) {
ReluHelper<float>::ReluBackward(stream, n, y, dy, dx);
}
void DnnIf<DeviceType::kGPU>::ReluBackward(ep::Stream* stream, const int64_t n, const double* x,
const double* y, const double* dy, double* dx) {
ReluHelper<double>::ReluBackward(stream, n, y, dy, dx);
}
void DnnIf<DeviceType::kGPU>::ReluBackward(ep::Stream* stream, const int64_t n, const float16* x,
const float16* y, const float16* dy, float16* dx) {
ReluHelper<half>::ReluBackward(stream, n, reinterpret_cast<const half*>(y),
reinterpret_cast<const half*>(dy), reinterpret_cast<half*>(dx));
}
void DnnIf<DeviceType::kGPU>::Sigmoid(ep::Stream* stream, int64_t n, const float* x, float* y) {
CHECK(IsKernelSafeInt32(n));
hipLaunchKernelGGL(( SigmoidForwardGpu<float>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(), n, x, y);
}
void DnnIf<DeviceType::kGPU>::Sigmoid(ep::Stream* stream, int64_t n, const double* x, double* y) {
CHECK(IsKernelSafeInt32(n));
hipLaunchKernelGGL(( SigmoidForwardGpu<double>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(), n, x, y);
}
void DnnIf<DeviceType::kGPU>::Sigmoid(ep::Stream* stream, int64_t n, const float16* x, float16* y) {
CHECK(IsKernelSafeInt32(n));
hipLaunchKernelGGL(( SigmoidForwardGpu<half>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
n, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y));
}
void DnnIf<DeviceType::kGPU>::SigmoidBackward(ep::Stream* stream, const int64_t n, const float* x,
const float* y, const float* dy, float* dx) {
CHECK(IsKernelSafeInt32(n));
hipLaunchKernelGGL(( SigmoidBackwardGpu<float>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(), n, y, dy, dx);
}
void DnnIf<DeviceType::kGPU>::SigmoidBackward(ep::Stream* stream, const int64_t n, const double* x,
const double* y, const double* dy, double* dx) {
CHECK(IsKernelSafeInt32(n));
hipLaunchKernelGGL(( SigmoidBackwardGpu<double>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(), n, y, dy, dx);
}
void DnnIf<DeviceType::kGPU>::SigmoidBackward(ep::Stream* stream, const int64_t n, const float16* x,
const float16* y, const float16* dy, float16* dx) {
CHECK(IsKernelSafeInt32(n));
hipLaunchKernelGGL(( SigmoidBackwardGpu<half>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
n, reinterpret_cast<const half*>(y), reinterpret_cast<const half*>(dy),
reinterpret_cast<half*>(dx));
}
} // namespace oneflow
| 3fd597f59b7acd6646da9e997b630fb8f60c96fd.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/util/cuda_dnn_interface.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/kernel/util/cuda_half_util.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void ReluForwardGpu(const int n, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = x[i] > 0 ? x[i] : 0; }
}
template<>
__global__ void ReluForwardGpu<half>(const int n, const half* x, half* y) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
CUDA_1D_KERNEL_LOOP(i, n) {
if (__hgt(x[i], hzero())) {
y[i] = x[i];
} else {
y[i] = hzero();
}
}
#else
HALF_CHECK_FAILED;
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) */
}
template<typename T>
__global__ void InplaceReluForwardGpu(const int n, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
// There is a subtle cuda bug in (y[i] <= 0)
if (!(y[i] > 0)) { y[i] = 0; }
}
}
template<>
__global__ void InplaceReluForwardGpu<half>(const int n, half* y) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
CUDA_1D_KERNEL_LOOP(i, n) {
if (!__hgt(y[i], hzero())) { y[i] = hzero(); }
}
#else
HALF_CHECK_FAILED;
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) */
}
template<typename T>
__global__ void ReluBackwardGpu(const int n, const T* y, const T* dy, T* dx) {
CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = y[i] > 0 ? dy[i] : 0; }
}
template<>
__global__ void ReluBackwardGpu<half>(const int n, const half* y, const half* dy, half* dx) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
half zero = __float2half(0.0);
CUDA_1D_KERNEL_LOOP(i, n) {
if (__hgt(y[i], zero)) {
dx[i] = dy[i];
} else {
dx[i] = zero;
}
}
#else
HALF_CHECK_FAILED;
#endif // __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
}
template<typename T>
__global__ void InplaceReluBackwardGpu(const int n, const T* y, T* dx) {
CUDA_1D_KERNEL_LOOP(i, n) {
if (!(y[i] > 0)) { dx[i] = 0; }
}
}
template<>
__global__ void InplaceReluBackwardGpu<half>(const int n, const half* y, half* dx) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
half zero = __float2half(0.0);
CUDA_1D_KERNEL_LOOP(i, n) {
if (!__hgt(y[i], zero)) { dx[i] = zero; }
}
#else
HALF_CHECK_FAILED;
#endif // __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
}
template<typename T>
__global__ void SigmoidForwardGpu(const int n, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = 1.0 / (1.0 + std::exp(-x[i])); }
}
template<>
__global__ void SigmoidForwardGpu<half>(const int n, const half* x, half* y) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = __hdiv(hone(), __hadd(hone(), hexp(__hneg(x[i])))); }
#else
HALF_CHECK_FAILED;
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) */
}
template<typename T>
__global__ void SigmoidBackwardGpu(const int n, const T* y, const T* dy, T* dx) {
CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = dy[i] * y[i] * (1.0 - y[i]); }
}
template<>
__global__ void SigmoidBackwardGpu<half>(const int n, const half* y, const half* dy, half* dx) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = __hmul(dy[i], __hmul(y[i], __hsub(hone(), y[i]))); }
#else
HALF_CHECK_FAILED;
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) */
}
template<typename T>
struct ReluHelper final {
static void ReluForward(ep::Stream* stream, const int64_t n, const T* x, T* y) {
CHECK_LE(n, GetMaxVal<int32_t>() / 2);
if (n == 0) { return; }
if (x == y) {
InplaceReluForwardGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(n, y);
} else {
ReluForwardGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(n, x, y);
}
}
static void ReluBackward(ep::Stream* stream, const int64_t n, const T* y, const T* dy, T* dx) {
CHECK_LE(n, GetMaxVal<int32_t>() / 2);
if (dy == dx) {
InplaceReluBackwardGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(n, y, dx);
} else {
ReluBackwardGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(n, y, dy, dx);
}
}
};
} // namespace
void DnnIf<DeviceType::kGPU>::Relu(ep::Stream* stream, const int64_t n, const float* x, float* y) {
ReluHelper<float>::ReluForward(stream, n, x, y);
}
void DnnIf<DeviceType::kGPU>::Relu(ep::Stream* stream, const int64_t n, const double* x,
double* y) {
ReluHelper<double>::ReluForward(stream, n, x, y);
}
void DnnIf<DeviceType::kGPU>::Relu(ep::Stream* stream, const int64_t n, const float16* x,
float16* y) {
ReluHelper<half>::ReluForward(stream, n, reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
}
void DnnIf<DeviceType::kGPU>::ReluBackward(ep::Stream* stream, const int64_t n, const float* x,
const float* y, const float* dy, float* dx) {
ReluHelper<float>::ReluBackward(stream, n, y, dy, dx);
}
void DnnIf<DeviceType::kGPU>::ReluBackward(ep::Stream* stream, const int64_t n, const double* x,
const double* y, const double* dy, double* dx) {
ReluHelper<double>::ReluBackward(stream, n, y, dy, dx);
}
void DnnIf<DeviceType::kGPU>::ReluBackward(ep::Stream* stream, const int64_t n, const float16* x,
const float16* y, const float16* dy, float16* dx) {
ReluHelper<half>::ReluBackward(stream, n, reinterpret_cast<const half*>(y),
reinterpret_cast<const half*>(dy), reinterpret_cast<half*>(dx));
}
void DnnIf<DeviceType::kGPU>::Sigmoid(ep::Stream* stream, int64_t n, const float* x, float* y) {
CHECK(IsKernelSafeInt32(n));
SigmoidForwardGpu<float><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(n, x, y);
}
void DnnIf<DeviceType::kGPU>::Sigmoid(ep::Stream* stream, int64_t n, const double* x, double* y) {
CHECK(IsKernelSafeInt32(n));
SigmoidForwardGpu<double><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(n, x, y);
}
void DnnIf<DeviceType::kGPU>::Sigmoid(ep::Stream* stream, int64_t n, const float16* x, float16* y) {
CHECK(IsKernelSafeInt32(n));
SigmoidForwardGpu<half><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
n, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y));
}
void DnnIf<DeviceType::kGPU>::SigmoidBackward(ep::Stream* stream, const int64_t n, const float* x,
const float* y, const float* dy, float* dx) {
CHECK(IsKernelSafeInt32(n));
SigmoidBackwardGpu<float><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(n, y, dy, dx);
}
void DnnIf<DeviceType::kGPU>::SigmoidBackward(ep::Stream* stream, const int64_t n, const double* x,
const double* y, const double* dy, double* dx) {
CHECK(IsKernelSafeInt32(n));
SigmoidBackwardGpu<double><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(n, y, dy, dx);
}
void DnnIf<DeviceType::kGPU>::SigmoidBackward(ep::Stream* stream, const int64_t n, const float16* x,
const float16* y, const float16* dy, float16* dx) {
CHECK(IsKernelSafeInt32(n));
SigmoidBackwardGpu<half><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
n, reinterpret_cast<const half*>(y), reinterpret_cast<const half*>(dy),
reinterpret_cast<half*>(dx));
}
} // namespace oneflow
|
11ba359b82080913176efd019db293e5991a98f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* 1k_jacobi5.cu
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
__global__ void
jacobikernel( float* a, float* newa, float* lchange, int n, int m, float w0, float w1, float w2, int sz )
{
int ti = threadIdx.x;
int tj = threadIdx.y;
int i = blockIdx.x * blockDim.x + ti + 1;
int j = blockIdx.y * blockDim.y + tj + 1;
newa[j*m+i] = w0*a[j*m+i] +
w1 * (a[j*m+i-1] + a[(j-1)*m+i] +
a[j*m+i+1] + a[(j+1)*m+i]) +
w2 * (a[(j-1)*m+i-1] + a[(j+1)*m+i-1] +
a[(j-1)*m+i+1] + a[(j+1)*m+i+1]);
__shared__ float mychange[256];
int ii = ti+blockDim.x*tj;
mychange[ii] = fabsf( newa[j*m+i] - a[j*m+i] );
__syncthreads();
int nn = blockDim.x * blockDim.y;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf( mychange[ii], mychange[ii+nn] );
__syncthreads();
}
if( ii == 0 )
lchange[blockIdx.x + gridDim.x*blockIdx.y] = mychange[0];
//***************** 1k optimization changes start here ******************//
__syncthreads();
int xi = blockIdx.x + gridDim.x*blockIdx.y;
if(xi == 0) {
float mych = 0.0f;
int ni = ti+blockDim.x*tj;
if( ni < sz ) mych = lchange[ni];
int mm = 256;
while( mm <= sz ){
if(ni+mm < sz)
mych = fmaxf( mych, lchange[ni+mm] );
mm += 256;
}
mychange[ni] = mych;
__syncthreads();
nn = blockDim.x*blockDim.x;
while( (nn>>=1) > 0 ){
if( ni < nn )
mychange[ni] = fmaxf(mychange[ni], mychange[ni+nn]);
__syncthreads();
}
if( ni == 0 )
lchange[0] = mychange[0];
}
//***************** 1k optimization changes end here ******************//
}
static float sumtime;
void JacobiGPU( float* a, int n, int m, float w0, float w1, float w2, float tol )
{
float change;
int iters;
size_t memsize;
int bx, by, gx, gy;
float *da, *dnewa, *lchange;
hipEvent_t e1, e2;
bx = 16;
by = 16;
gx = (n-2)/bx + ((n-2)%bx == 0?0:1);
gy = (m-2)/by + ((m-2)%by == 0?0:1);
sumtime = 0.0f;
memsize = sizeof(float) * n * m;
hipMalloc( &da, memsize );
hipMalloc( &dnewa, memsize );
hipMalloc( &lchange, gx * gy * sizeof(float) );
hipEventCreate( &e1 );
hipEventCreate( &e2 );
dim3 block( bx, by );
dim3 grid( gx, gy );
iters = 0;
hipMemcpy( da, a, memsize, hipMemcpyHostToDevice );
hipMemcpy( dnewa, a, memsize, hipMemcpyHostToDevice );
do{
float msec;
++iters;
hipEventRecord( e1 );
hipLaunchKernelGGL(( jacobikernel), dim3(grid), dim3(block) , 0, 0, da, dnewa, lchange, n, m, w0, w1, w2, gx*gy );
hipEventRecord( e2 );
hipMemcpy( &change, lchange, sizeof(float), hipMemcpyDeviceToHost );
hipEventElapsedTime( &msec, e1, e2 );
sumtime += msec;
float *ta;
ta = da;
da = dnewa;
dnewa = ta;
}while( change > tol );
double time = sumtime/1000.0f;
double dNumOps = 14.0 * iters * n *m;
double gflops = dNumOps/time/1e9;
printf( "JacobiGPU converged in %d iterations to residual %f\n", iters, change );
printf( "JacobiGPU used %.5f seconds total\n", sumtime/1000.0f );
printf( "Size(Number of Operations) = %.0f Ops/sec \n", dNumOps );
printf( "Throughtput = %.4f GFlops/sec \n",gflops );
hipMemcpy( a, dnewa, memsize, hipMemcpyDeviceToHost );
hipFree( da );
hipFree( dnewa );
hipFree( lchange );
hipEventDestroy( e1 );
hipEventDestroy( e2 );
}
static void init( float* a, int n, int m )
{
int i, j;
memset( a, 0, sizeof(float) * n * m );
/* boundary conditions */
for( j = 0; j < n; ++j ){
a[j*m+n-1] = j;
}
for( i = 0; i < m; ++i ){
a[(n-1)*m+i] = i;
}
a[(n-1)*m+m-1] = m+n;
}
int
main( int argc, char* argv[] )
{
int n, m;
float *a;
struct timeval tt1, tt2;
int ms;
float fms;
if( argc <= 1 ){
fprintf( stderr, "%s sizen [sizem]\n", argv[0] );
return 1;
}
n = atoi( argv[1] );
if( n <= 0 ) n = 100;
m = n;
if( argc > 2 ){
m = atoi( argv[2] );
if( m <= 0 ) m = 100;
}
printf( "Jacobi %d x %d\n", n, m );
a = (float*)malloc( sizeof(float) * n * m );
init( a, n, m );
gettimeofday( &tt1, NULL );
JacobiGPU( a, n, m, .2, .1, .1, .1 );
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = (float)ms / 1000000.0f;
printf( "time(gpu ) = %f seconds\n", fms );
}
| 11ba359b82080913176efd019db293e5991a98f8.cu | /*
* 1k_jacobi5.cu
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
__global__ void
jacobikernel( float* a, float* newa, float* lchange, int n, int m, float w0, float w1, float w2, int sz )
{
int ti = threadIdx.x;
int tj = threadIdx.y;
int i = blockIdx.x * blockDim.x + ti + 1;
int j = blockIdx.y * blockDim.y + tj + 1;
newa[j*m+i] = w0*a[j*m+i] +
w1 * (a[j*m+i-1] + a[(j-1)*m+i] +
a[j*m+i+1] + a[(j+1)*m+i]) +
w2 * (a[(j-1)*m+i-1] + a[(j+1)*m+i-1] +
a[(j-1)*m+i+1] + a[(j+1)*m+i+1]);
__shared__ float mychange[256];
int ii = ti+blockDim.x*tj;
mychange[ii] = fabsf( newa[j*m+i] - a[j*m+i] );
__syncthreads();
int nn = blockDim.x * blockDim.y;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf( mychange[ii], mychange[ii+nn] );
__syncthreads();
}
if( ii == 0 )
lchange[blockIdx.x + gridDim.x*blockIdx.y] = mychange[0];
//***************** 1k optimization changes start here ******************//
__syncthreads();
int xi = blockIdx.x + gridDim.x*blockIdx.y;
if(xi == 0) {
float mych = 0.0f;
int ni = ti+blockDim.x*tj;
if( ni < sz ) mych = lchange[ni];
int mm = 256;
while( mm <= sz ){
if(ni+mm < sz)
mych = fmaxf( mych, lchange[ni+mm] );
mm += 256;
}
mychange[ni] = mych;
__syncthreads();
nn = blockDim.x*blockDim.x;
while( (nn>>=1) > 0 ){
if( ni < nn )
mychange[ni] = fmaxf(mychange[ni], mychange[ni+nn]);
__syncthreads();
}
if( ni == 0 )
lchange[0] = mychange[0];
}
//***************** 1k optimization changes end here ******************//
}
static float sumtime;
void JacobiGPU( float* a, int n, int m, float w0, float w1, float w2, float tol )
{
float change;
int iters;
size_t memsize;
int bx, by, gx, gy;
float *da, *dnewa, *lchange;
cudaEvent_t e1, e2;
bx = 16;
by = 16;
gx = (n-2)/bx + ((n-2)%bx == 0?0:1);
gy = (m-2)/by + ((m-2)%by == 0?0:1);
sumtime = 0.0f;
memsize = sizeof(float) * n * m;
cudaMalloc( &da, memsize );
cudaMalloc( &dnewa, memsize );
cudaMalloc( &lchange, gx * gy * sizeof(float) );
cudaEventCreate( &e1 );
cudaEventCreate( &e2 );
dim3 block( bx, by );
dim3 grid( gx, gy );
iters = 0;
cudaMemcpy( da, a, memsize, cudaMemcpyHostToDevice );
cudaMemcpy( dnewa, a, memsize, cudaMemcpyHostToDevice );
do{
float msec;
++iters;
cudaEventRecord( e1 );
jacobikernel<<< grid, block >>>( da, dnewa, lchange, n, m, w0, w1, w2, gx*gy );
cudaEventRecord( e2 );
cudaMemcpy( &change, lchange, sizeof(float), cudaMemcpyDeviceToHost );
cudaEventElapsedTime( &msec, e1, e2 );
sumtime += msec;
float *ta;
ta = da;
da = dnewa;
dnewa = ta;
}while( change > tol );
double time = sumtime/1000.0f;
double dNumOps = 14.0 * iters * n *m;
double gflops = dNumOps/time/1e9;
printf( "JacobiGPU converged in %d iterations to residual %f\n", iters, change );
printf( "JacobiGPU used %.5f seconds total\n", sumtime/1000.0f );
printf( "Size(Number of Operations) = %.0f Ops/sec \n", dNumOps );
printf( "Throughtput = %.4f GFlops/sec \n",gflops );
cudaMemcpy( a, dnewa, memsize, cudaMemcpyDeviceToHost );
cudaFree( da );
cudaFree( dnewa );
cudaFree( lchange );
cudaEventDestroy( e1 );
cudaEventDestroy( e2 );
}
static void init( float* a, int n, int m )
{
int i, j;
memset( a, 0, sizeof(float) * n * m );
/* boundary conditions */
for( j = 0; j < n; ++j ){
a[j*m+n-1] = j;
}
for( i = 0; i < m; ++i ){
a[(n-1)*m+i] = i;
}
a[(n-1)*m+m-1] = m+n;
}
int
main( int argc, char* argv[] )
{
int n, m;
float *a;
struct timeval tt1, tt2;
int ms;
float fms;
if( argc <= 1 ){
fprintf( stderr, "%s sizen [sizem]\n", argv[0] );
return 1;
}
n = atoi( argv[1] );
if( n <= 0 ) n = 100;
m = n;
if( argc > 2 ){
m = atoi( argv[2] );
if( m <= 0 ) m = 100;
}
printf( "Jacobi %d x %d\n", n, m );
a = (float*)malloc( sizeof(float) * n * m );
init( a, n, m );
gettimeofday( &tt1, NULL );
JacobiGPU( a, n, m, .2, .1, .1, .1 );
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = (float)ms / 1000000.0f;
printf( "time(gpu ) = %f seconds\n", fms );
}
|
2cd047facc8650440e01e74f80b243785817665c.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <sstream>
#include <iomanip> // std::setfill, std::setw
#include <string>
#include <omp.h>
#include <mpi.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <hetero_cmdparser.hpp>
using namespace std;
////////////////////////////////////////////////////////////////////////////////////////////////////
#define cudaCheckLastError() { \
hipError_t error = hipGetLastError(); \
int id; hipGetDevice(&id); \
if(error != hipSuccess) { \
printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \
__FILE__,__LINE__, hipGetErrorString(error), id); \
exit(EXIT_FAILURE); \
} \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkReadFile(filename, pData, size) { \
fstream *fs = new fstream; \
fs->open(filename.c_str(), ios::in|ios::binary); \
if (!fs->is_open()) \
{ \
printf("Cannot open file '%s' in file '%s' at line %i\n", \
filename, __FILE__, __LINE__); \
goto cleanup; \
} \
fs->read(reinterpret_cast<char*>(pData), size); \
cleanup : \
fs->close(); \
delete fs; \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkWriteFile(filename, pData, size) { \
fstream *fs = new fstream; \
fs->open(filename, ios::out|ios::binary); \
if (!fs->is_open()) \
{ \
fprintf(stderr, "Cannot open file '%s' in file '%s' at line %i\n", \
filename, __FILE__, __LINE__); \
return 1; \
} \
fs->write(reinterpret_cast<char*>(pData), size); \
fs->close(); \
delete fs; \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
const char* key =
"{ h |help | | print help message }"
"{ i |srcFile | | source of the file }"
"{ dimx|dimx | | dimensionx }"
"{ dimy|dimy | | dimensiony }"
;
////////////////////////////////////////////////////////////////////////////////////////////////////
#define grid_side 3
int main(int argc, char *argv[])
{
//================================================================================
// To set the GPU using hipSetDevice, we must set before launching MPI_Init
// Determine the MPI local rank per node is doable either in OpenMPI or MVAPICH2
int localRank;
char *localRankStr = NULL;
//================================================================================
// Investigate the number of GPUs per node.
int deviceCount = 0;
localRankStr = getenv("OMPI_COMM_WORLD_LOCAL_RANK");
if (localRankStr != NULL)
{
localRank = atoi(localRankStr);
hipGetDeviceCount(&deviceCount);
// cudaCheckLastError(); //Don't put this line
// printf("There are %02d device(s) at local process %02d\n",
// deviceCount, localRank);
cout << "There are " << deviceCount
<< " device(s) at local process "
<< endl;
if(deviceCount>0)
{
hipSetDevice(localRank % deviceCount); cudaCheckLastError();
hipDeviceReset(); cudaCheckLastError();
// hipDeviceEnablePeerAccess (localRank % deviceCount, 0); cudaCheckLastError();
for(int d=0; d<deviceCount; d++)
{
if(d!=(localRank % deviceCount))
{
hipDeviceEnablePeerAccess (d, 0); cudaCheckLastError();
}
}
}
}
//================================================================================
// Initialize MPI
int rank, size;
char name[MPI_MAX_PROCESSOR_NAME];
int length;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Get_processor_name(name, &length);
printf("This is rank %02d, size %02d, of %s\n", rank, size, name);
MPI_Barrier(MPI_COMM_WORLD);
//
MPI_Comm comm2d; /* Cartesian communicator */
int dims[2];// = {0, 0}; /* allow MPI to choose grid block dimensions */
int periodic[2];// = {0, 0}; /* domain is non-periodic */
int reorder;// = 1; /* allow processes to be re-ranked */
int coords[2]; /* coordinates of our block in grid */
int up, down; /* ranks of processes above and below ours */
int left, right; /* ranks of processes to each side of ours */
//
int master = 0;
int worker;
int numMasters = 1;
int numWorkers = size;
// Parsing the arguments
CommandLineParser cmd(argc, argv, key);
if(rank==master) cmd.printParams();
MPI_Barrier(MPI_COMM_WORLD);
dims[0] = 2;
dims[1] = 2;
periodic[0] = 0;
periodic[1] = 0;
reorder = 1;
// // Set up Cartesian grid of processors. A new communicator is
// // created we get our rank within it.
// MPI_Dims_create(rank, 2, dims); ///This line will not work
MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periodic, reorder, &comm2d );
MPI_Cart_get(comm2d, 2, dims, periodic, coords );
MPI_Comm_rank(comm2d, &rank );
printf("%d, %d, %d\n",coords[0],coords[1],rank);
MPI_Barrier(MPI_COMM_WORLD);
// Retrieve the information from cmd
const string srcFile = cmd.get<string>("srcFile", false);
const int dimx = cmd.get<int>("dimx", false);
const int dimy = cmd.get<int>("dimy", false);
const int total = dimx*dimy;
float *h_src = new float[total];
// Read to pointer in master process
if(rank==master)
{
cout << "Here" << endl;
checkReadFile(srcFile, h_src, total*sizeof(float));
cout << "Here" << endl;
}
// int3 clusterDim = make_int3(dims[0], dims[1], 1);
int processIdx_1d = rank;
int3 processIdx_2d = make_int3(coords[0], coords[1], 1);
/// Mimic Pack and Unpack MPI
int dimz = 1;
int3 featureIdx { 0, 0, 0};
int3 processIdx { 0, 0, 0};
int3 processDim {256, 256, 1};
int3 subDataDim {0, 0, 0};
int3 clusterDim {(dimx/processDim.x + ((dimx%processDim.x)?1:0)),
(dimy/processDim.y + ((dimy%processDim.y)?1:0)),
(dimz/processDim.z + ((dimz%processDim.z)?1:0))};
float *tmp = new float[processDim.x * processDim.y]; // Create process beyond the sub problem size
hipHostRegister(h_src, processDim.x * processDim.y *sizeof(float), hipHostRegisterPortable);
MPI_Request request;
float *p_src;
// p_src = (float*)malloc(processDim.x*processDim.y*sizeof(float));
hipMalloc((void**)&p_src, (processDim.x*processDim.y)*sizeof(float));
//Start packing
/// Naive approach, copy to another buffer, then send
int2 index_2d;
double start = MPI_Wtime();
int caught = 0;
if(rank==master)
{
for(processIdx.y=0; processIdx.y<clusterDim.y; processIdx.y++)
{
for(processIdx.x=0; processIdx.x<clusterDim.x; processIdx.x++)
{
/// !!! First step: Determine size of buffer
for(featureIdx.y=0; featureIdx.y<processDim.y; featureIdx.y++)
{
for(featureIdx.x=0; featureIdx.x<processDim.x; featureIdx.x++)
{
//2D global index
index_2d = make_int2(
processIdx.x*processDim.x+featureIdx.x,
processIdx.y*processDim.y+featureIdx.y);
if(index_2d.x==dimx) break;
}
if(index_2d.y==dimy) break;
}
subDataDim = make_int3(featureIdx.x, featureIdx.y, 1);
cout << "Sub problem size: " << subDataDim.x << " " << subDataDim.y << endl;
//Second step: copy subdataSize
index_2d = make_int2(
processIdx.x*processDim.x+0,
processIdx.y*processDim.y+0);
MPI_Datatype mysubarray;
int starts[2] = {index_2d.y, index_2d.x}; ///!Order is very important
int subsizes[2] = {subDataDim.y, subDataDim.x}; ///!Order is very important
int bigsizes[2] = {dimy, dimx}; ///!Order is very important
MPI_Type_create_subarray(2, bigsizes, subsizes, starts,
MPI_ORDER_C, MPI_FLOAT, &mysubarray);
MPI_Type_commit(&mysubarray);
// for(featureIdx.y=0; featureIdx.y<processDim.y; featureIdx.y++)
// {
// for(featureIdx.x=0; featureIdx.x<processDim.x; featureIdx.x++)
// {
// if(featureIdx.x == 0) // First position of first block
// {
// //2D global index
// index_2d = make_int2(
// processIdx.x*processDim.x+featureIdx.x,
// processIdx.y*processDim.y+featureIdx.y);
// if(index_2d.y<dimy)
// {
// // cout << "Caught " << ++caught << endl;
// memcpy(
// // &tmp[featureIdx.y * processDim.x],
// &tmp[featureIdx.y * subDataDim.x],
// &h_src[index_2d.y*dimx + index_2d.x],
// // processDim.x*sizeof(float));
// subDataDim.x*sizeof(float));
// }
// }
// }
// }
processIdx_1d = processIdx.y * clusterDim.x + processIdx.x;
cout << processIdx_1d << endl;
/// !!! Send to worker process
// Send the size of message
MPI_Isend(&subDataDim, 1, MPI_DOUBLE, processIdx_1d, 0, MPI_COMM_WORLD, &request);
// Send the message
// MPI_Isend(tmp, subDataDim.x * subDataDim.y, MPI_FLOAT, processIdx_1d, 1, MPI_COMM_WORLD, &request);
MPI_Isend(h_src, 1, mysubarray, processIdx_1d, 1, MPI_COMM_WORLD, &request);
// MPI_Send(&(bigarray[0][0]), 1, mysubarray, receiver, ourtag, MPI_COMM_WORLD);
cout << "Sent" << endl;
// free(tmp);
MPI_Type_free(&mysubarray);
}
}
}
MPI_Barrier(MPI_COMM_WORLD);
// MPI_Recv(p_src, processDim.x*processDim.y, MPI_FLOAT, master, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&subDataDim, 1, MPI_DOUBLE, master, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(p_src, subDataDim.x * subDataDim.y, MPI_FLOAT, master, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
double elapsed = MPI_Wtime() - start;
if(rank==master) cout << "Time : " << elapsed << " s " << endl;
/// Debug
MPI_Barrier(MPI_COMM_WORLD);
char *filename = new char[100];
sprintf(filename, "result_%02d_%02d.raw", processIdx_2d.x, processIdx_2d.y);
printf("%s\n", filename);
float *h_tmp;
h_tmp = (float*)malloc(processDim.x*processDim.y*sizeof(float));
hipHostRegister(h_tmp, processDim.x * processDim.y *sizeof(float), hipHostRegisterPortable);
hipMemcpy(h_tmp, p_src, processDim.x*processDim.y*sizeof(float), hipMemcpyDeviceToHost); cudaCheckLastError();
checkWriteFile(filename, h_tmp, processDim.x*processDim.y*sizeof(float));
checkWriteFile(filename, p_src, processDim.x*processDim.y*sizeof(float));
MPI_Finalize();
return 0;
}
| 2cd047facc8650440e01e74f80b243785817665c.cu | #include <iostream>
#include <fstream>
#include <sstream>
#include <iomanip> // std::setfill, std::setw
#include <string>
#include <omp.h>
#include <mpi.h>
#include <cuda.h>
#include <assert.h>
#include <hetero_cmdparser.hpp>
using namespace std;
////////////////////////////////////////////////////////////////////////////////////////////////////
#define cudaCheckLastError() { \
cudaError_t error = cudaGetLastError(); \
int id; cudaGetDevice(&id); \
if(error != cudaSuccess) { \
printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \
__FILE__,__LINE__, cudaGetErrorString(error), id); \
exit(EXIT_FAILURE); \
} \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkReadFile(filename, pData, size) { \
fstream *fs = new fstream; \
fs->open(filename.c_str(), ios::in|ios::binary); \
if (!fs->is_open()) \
{ \
printf("Cannot open file '%s' in file '%s' at line %i\n", \
filename, __FILE__, __LINE__); \
goto cleanup; \
} \
fs->read(reinterpret_cast<char*>(pData), size); \
cleanup : \
fs->close(); \
delete fs; \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkWriteFile(filename, pData, size) { \
fstream *fs = new fstream; \
fs->open(filename, ios::out|ios::binary); \
if (!fs->is_open()) \
{ \
fprintf(stderr, "Cannot open file '%s' in file '%s' at line %i\n", \
filename, __FILE__, __LINE__); \
return 1; \
} \
fs->write(reinterpret_cast<char*>(pData), size); \
fs->close(); \
delete fs; \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
const char* key =
"{ h |help | | print help message }"
"{ i |srcFile | | source of the file }"
"{ dimx|dimx | | dimensionx }"
"{ dimy|dimy | | dimensiony }"
;
////////////////////////////////////////////////////////////////////////////////////////////////////
#define grid_side 3
int main(int argc, char *argv[])
{
//================================================================================
// To set the GPU using cudaSetDevice, we must set before launching MPI_Init
// Determine the MPI local rank per node is doable either in OpenMPI or MVAPICH2
int localRank;
char *localRankStr = NULL;
//================================================================================
// Investigate the number of GPUs per node.
int deviceCount = 0;
localRankStr = getenv("OMPI_COMM_WORLD_LOCAL_RANK");
if (localRankStr != NULL)
{
localRank = atoi(localRankStr);
cudaGetDeviceCount(&deviceCount);
// cudaCheckLastError(); //Don't put this line
// printf("There are %02d device(s) at local process %02d\n",
// deviceCount, localRank);
cout << "There are " << deviceCount
<< " device(s) at local process "
<< endl;
if(deviceCount>0)
{
cudaSetDevice(localRank % deviceCount); cudaCheckLastError();
cudaDeviceReset(); cudaCheckLastError();
// cudaDeviceEnablePeerAccess (localRank % deviceCount, 0); cudaCheckLastError();
for(int d=0; d<deviceCount; d++)
{
if(d!=(localRank % deviceCount))
{
cudaDeviceEnablePeerAccess (d, 0); cudaCheckLastError();
}
}
}
}
//================================================================================
// Initialize MPI
int rank, size;
char name[MPI_MAX_PROCESSOR_NAME];
int length;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Get_processor_name(name, &length);
printf("This is rank %02d, size %02d, of %s\n", rank, size, name);
MPI_Barrier(MPI_COMM_WORLD);
//
MPI_Comm comm2d; /* Cartesian communicator */
int dims[2];// = {0, 0}; /* allow MPI to choose grid block dimensions */
int periodic[2];// = {0, 0}; /* domain is non-periodic */
int reorder;// = 1; /* allow processes to be re-ranked */
int coords[2]; /* coordinates of our block in grid */
int up, down; /* ranks of processes above and below ours */
int left, right; /* ranks of processes to each side of ours */
//
int master = 0;
int worker;
int numMasters = 1;
int numWorkers = size;
// Parsing the arguments
CommandLineParser cmd(argc, argv, key);
if(rank==master) cmd.printParams();
MPI_Barrier(MPI_COMM_WORLD);
dims[0] = 2;
dims[1] = 2;
periodic[0] = 0;
periodic[1] = 0;
reorder = 1;
// // Set up Cartesian grid of processors. A new communicator is
// // created we get our rank within it.
// MPI_Dims_create(rank, 2, dims); ///This line will not work
MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periodic, reorder, &comm2d );
MPI_Cart_get(comm2d, 2, dims, periodic, coords );
MPI_Comm_rank(comm2d, &rank );
printf("%d, %d, %d\n",coords[0],coords[1],rank);
MPI_Barrier(MPI_COMM_WORLD);
// Retrieve the information from cmd
const string srcFile = cmd.get<string>("srcFile", false);
const int dimx = cmd.get<int>("dimx", false);
const int dimy = cmd.get<int>("dimy", false);
const int total = dimx*dimy;
float *h_src = new float[total];
// Read to pointer in master process
if(rank==master)
{
cout << "Here" << endl;
checkReadFile(srcFile, h_src, total*sizeof(float));
cout << "Here" << endl;
}
// int3 clusterDim = make_int3(dims[0], dims[1], 1);
int processIdx_1d = rank;
int3 processIdx_2d = make_int3(coords[0], coords[1], 1);
/// Mimic Pack and Unpack MPI
int dimz = 1;
int3 featureIdx { 0, 0, 0};
int3 processIdx { 0, 0, 0};
int3 processDim {256, 256, 1};
int3 subDataDim {0, 0, 0};
int3 clusterDim {(dimx/processDim.x + ((dimx%processDim.x)?1:0)),
(dimy/processDim.y + ((dimy%processDim.y)?1:0)),
(dimz/processDim.z + ((dimz%processDim.z)?1:0))};
float *tmp = new float[processDim.x * processDim.y]; // Create process beyond the sub problem size
cudaHostRegister(h_src, processDim.x * processDim.y *sizeof(float), cudaHostRegisterPortable);
MPI_Request request;
float *p_src;
// p_src = (float*)malloc(processDim.x*processDim.y*sizeof(float));
cudaMalloc((void**)&p_src, (processDim.x*processDim.y)*sizeof(float));
//Start packing
/// Naive approach, copy to another buffer, then send
int2 index_2d;
double start = MPI_Wtime();
int caught = 0;
if(rank==master)
{
for(processIdx.y=0; processIdx.y<clusterDim.y; processIdx.y++)
{
for(processIdx.x=0; processIdx.x<clusterDim.x; processIdx.x++)
{
/// !!! First step: Determine size of buffer
for(featureIdx.y=0; featureIdx.y<processDim.y; featureIdx.y++)
{
for(featureIdx.x=0; featureIdx.x<processDim.x; featureIdx.x++)
{
//2D global index
index_2d = make_int2(
processIdx.x*processDim.x+featureIdx.x,
processIdx.y*processDim.y+featureIdx.y);
if(index_2d.x==dimx) break;
}
if(index_2d.y==dimy) break;
}
subDataDim = make_int3(featureIdx.x, featureIdx.y, 1);
cout << "Sub problem size: " << subDataDim.x << " " << subDataDim.y << endl;
//Second step: copy subdataSize
index_2d = make_int2(
processIdx.x*processDim.x+0,
processIdx.y*processDim.y+0);
MPI_Datatype mysubarray;
int starts[2] = {index_2d.y, index_2d.x}; ///!Order is very important
int subsizes[2] = {subDataDim.y, subDataDim.x}; ///!Order is very important
int bigsizes[2] = {dimy, dimx}; ///!Order is very important
MPI_Type_create_subarray(2, bigsizes, subsizes, starts,
MPI_ORDER_C, MPI_FLOAT, &mysubarray);
MPI_Type_commit(&mysubarray);
// for(featureIdx.y=0; featureIdx.y<processDim.y; featureIdx.y++)
// {
// for(featureIdx.x=0; featureIdx.x<processDim.x; featureIdx.x++)
// {
// if(featureIdx.x == 0) // First position of first block
// {
// //2D global index
// index_2d = make_int2(
// processIdx.x*processDim.x+featureIdx.x,
// processIdx.y*processDim.y+featureIdx.y);
// if(index_2d.y<dimy)
// {
// // cout << "Caught " << ++caught << endl;
// memcpy(
// // &tmp[featureIdx.y * processDim.x],
// &tmp[featureIdx.y * subDataDim.x],
// &h_src[index_2d.y*dimx + index_2d.x],
// // processDim.x*sizeof(float));
// subDataDim.x*sizeof(float));
// }
// }
// }
// }
processIdx_1d = processIdx.y * clusterDim.x + processIdx.x;
cout << processIdx_1d << endl;
/// !!! Send to worker process
// Send the size of message
MPI_Isend(&subDataDim, 1, MPI_DOUBLE, processIdx_1d, 0, MPI_COMM_WORLD, &request);
// Send the message
// MPI_Isend(tmp, subDataDim.x * subDataDim.y, MPI_FLOAT, processIdx_1d, 1, MPI_COMM_WORLD, &request);
MPI_Isend(h_src, 1, mysubarray, processIdx_1d, 1, MPI_COMM_WORLD, &request);
// MPI_Send(&(bigarray[0][0]), 1, mysubarray, receiver, ourtag, MPI_COMM_WORLD);
cout << "Sent" << endl;
// free(tmp);
MPI_Type_free(&mysubarray);
}
}
}
MPI_Barrier(MPI_COMM_WORLD);
// MPI_Recv(p_src, processDim.x*processDim.y, MPI_FLOAT, master, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&subDataDim, 1, MPI_DOUBLE, master, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(p_src, subDataDim.x * subDataDim.y, MPI_FLOAT, master, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
double elapsed = MPI_Wtime() - start;
if(rank==master) cout << "Time : " << elapsed << " s " << endl;
/// Debug
MPI_Barrier(MPI_COMM_WORLD);
char *filename = new char[100];
sprintf(filename, "result_%02d_%02d.raw", processIdx_2d.x, processIdx_2d.y);
printf("%s\n", filename);
float *h_tmp;
h_tmp = (float*)malloc(processDim.x*processDim.y*sizeof(float));
cudaHostRegister(h_tmp, processDim.x * processDim.y *sizeof(float), cudaHostRegisterPortable);
cudaMemcpy(h_tmp, p_src, processDim.x*processDim.y*sizeof(float), cudaMemcpyDeviceToHost); cudaCheckLastError();
checkWriteFile(filename, h_tmp, processDim.x*processDim.y*sizeof(float));
checkWriteFile(filename, p_src, processDim.x*processDim.y*sizeof(float));
MPI_Finalize();
return 0;
}
|
ad5a3b81d3fb174df3205203d2b14be744f97181.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_axpy.h"
#include "hip/hip_fp16.h"
namespace anakin{
namespace saber{
template <typename DataDtype>
__global__ void ker_axpy_fwd(int n, int img_size,
const DataDtype* scale, const DataDtype* x, const DataDtype* y, DataDtype* dst) {
CUDA_KERNEL_LOOP(idx, n) {
int scale_id = idx / img_size;
dst[idx] = scale[scale_id] * x[idx] + y[idx];
}
}
template <DataType OpDtype ,
DataType inDtype,
DataType outDtype,
typename LayOutType_op,
typename LayOutType_in,
typename LayOutType_out>
SaberStatus SaberAxpy<NV, OpDtype, inDtype, outDtype,\
LayOutType_op, LayOutType_in, LayOutType_out>::dispatch(const std::vector<DataTensor_in *>& inputs,
std::vector<DataTensor_out *>& outputs,
AxpyParam<OpTensor>& param) {
hipStream_t cuda_stream = this->_ctx->get_compute_stream();
if (!(inputs[1]->valid_shape() == outputs[0]->valid_shape())
|| !(inputs[2]->valid_shape() == outputs[0]->valid_shape())) {
return SaberUnKownError;
}
const InDataType* scale = inputs[0]->data();
const InDataType* x = inputs[1]->data();
const InDataType* y = inputs[2]->data();
OutDataType* dst = outputs[0]->mutable_data();
int img_size = outputs[0]->height() * outputs[0]->width();
int count = outputs[0]->valid_size();
if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()
&& inputs[1]->is_continue_mem() && inputs[2]->is_continue_mem()) {
hipLaunchKernelGGL(( ker_axpy_fwd<InDataType>), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, \
count, img_size, scale, x, y, dst);
}
return SaberSuccess;
}
} //namespace anakin
} //namespace anakin
| ad5a3b81d3fb174df3205203d2b14be744f97181.cu | #include "saber/funcs/impl/cuda/saber_axpy.h"
#include "cuda_fp16.h"
namespace anakin{
namespace saber{
template <typename DataDtype>
__global__ void ker_axpy_fwd(int n, int img_size,
const DataDtype* scale, const DataDtype* x, const DataDtype* y, DataDtype* dst) {
CUDA_KERNEL_LOOP(idx, n) {
int scale_id = idx / img_size;
dst[idx] = scale[scale_id] * x[idx] + y[idx];
}
}
template <DataType OpDtype ,
DataType inDtype,
DataType outDtype,
typename LayOutType_op,
typename LayOutType_in,
typename LayOutType_out>
SaberStatus SaberAxpy<NV, OpDtype, inDtype, outDtype,\
LayOutType_op, LayOutType_in, LayOutType_out>::dispatch(const std::vector<DataTensor_in *>& inputs,
std::vector<DataTensor_out *>& outputs,
AxpyParam<OpTensor>& param) {
cudaStream_t cuda_stream = this->_ctx->get_compute_stream();
if (!(inputs[1]->valid_shape() == outputs[0]->valid_shape())
|| !(inputs[2]->valid_shape() == outputs[0]->valid_shape())) {
return SaberUnKownError;
}
const InDataType* scale = inputs[0]->data();
const InDataType* x = inputs[1]->data();
const InDataType* y = inputs[2]->data();
OutDataType* dst = outputs[0]->mutable_data();
int img_size = outputs[0]->height() * outputs[0]->width();
int count = outputs[0]->valid_size();
if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()
&& inputs[1]->is_continue_mem() && inputs[2]->is_continue_mem()) {
ker_axpy_fwd<InDataType><<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>\
(count, img_size, scale, x, y, dst);
}
return SaberSuccess;
}
} //namespace anakin
} //namespace anakin
|
19c680889bab0d9d29ee3c631a72bf6de05c1de8.hip | // !!! This is a file automatically generated by hipify!!!
#include"1dIsing.h"
/* Benchmarks */
void benchH(int* lat, int* devLat, int N,float deltaT,
int *cov, int*devCov, reactData react, FILE* results, hiprandState_t *devStates, arguments args){
int j = 0;
int steps = (int)(args.finalT/deltaT);
float meanCov;
float ssaCov;
int *ssaLat = (int*)malloc(N * sizeof(int));
if(ssaLat == NULL){
printf("ssaLat = NULL\n");
exit(1);
}
for(j = 0;j < N;j++)
ssaLat[j] = lat[j];
printf("\n h\t\t DevCov\t\t SSACov\t\tExact solution\tError\n");
j = 0;
for(j = 0; j < STEPS_IN_H; j++){
react.h = j*2.0/(STEPS_IN_H - 1);
//Burning time
fractionalStep(lat,devLat,BURNING, react, devStates, deltaT, N, cov, devCov, args);
//Actual computation
meanCov = fractionalStep(lat,devLat,steps,
react, devStates, deltaT, N, cov, devCov,args);
// ssaCov = ssa(ssaLat, N, 10000, react);
fprintf(results,"%f\t%f\n",react.h,meanCov);
printf("%f\t%f\t%f\t%f\t%1.2e\n",react.h,meanCov,ssaCov,
exactMagnetization(react),fabs(meanCov-exactMagnetization(react)));
}
free(ssaLat);
}
//============================================================
void benchError(int* lat, int* devLat, int N,float deltaT,
int *cov, int*devCov, reactData react, FILE* results, hiprandState_t *devStates, arguments args){
int i, j;
int top = 10; // Change the top variable to take more points in the benchmark
float ssaCov;
int nReals = 20; // Number of realizations to take
int steps = (int)(args.finalT/deltaT);
float meanCov;
if(N!= (int)pow(2,top)){
printf("ERROR : benchError :\n\tDimension of lattice : %d\n\tDimension required for this test : %d\n\t",N, (int)pow(2,top));
printf("Either change the 'top' variable in 'benchError'\n\tor the size of the lattice\n\t");
printf("In the second case, split %d between threads (-t) and blocks (-b)\n",(int)pow(2,top)/8);
exit(1);
}
if(args.blocks != 2){
printf("ERROR : benchError :\tThe number of blocks must be two for this benchmark.\n");
exit(1);
}
ssaCov = ssa(lat, N, 10000, react);
printf("Offset\t <m> (fs)\t <m> (ssa)\t exact\t\t Error\n");
for(i = 2;i < (top - 1);i++){
args.offSet = (int)pow(2, i );
args.threads = (int)pow(2, top - i - 2);
args.blocks = 2;
meanCov = 0.0;
for(j = 0;j < nReals;j++){
//Burning time
//fractionalStep(lat,devLat,BURNING, react, devStates, deltaT, N, cov, devCov, args);
//Actual computation
meanCov += fractionalStep(lat,devLat,steps, react, devStates, deltaT, N, cov, devCov,args);
}
meanCov = meanCov/nReals;
fprintf(results,"%d\t\t%f\n",args.offSet, meanCov, ssaCov,fabs(meanCov - ssaCov));
printf("%d\t%f\t%f\t%f\t%1.2e\n",args.offSet,meanCov,ssaCov, exactMagnetization(react),fabs(meanCov-exactMagnetization(react)));
}
}
//===========================================================
/*
Benchmark for different DT and constant h
*/
void benchDt(int* lat, int* devLat, int N,float deltaT,
int *cov, int*devCov, reactData react, FILE* results, hiprandState_t *devStates, arguments args){
int j,i;
float meanCov;
float ssaCov;
float exactSolution;
int *ssaLat = (int*)malloc(N * sizeof(int));
const int steps = (int)(args.finalT/deltaT);
const int DTsteps = 20; // Different Dt to try
const float largestDt = 3.0;
const float smallestDt = 1/2.0;
const int nReals = 10; // Number of realizations to take
if(ssaLat == NULL){
printf("ssaLat = NULL\n");
exit(1);
}
for(j = 0;j < N;j++)
ssaLat[j] = lat[j];
printf("\n Dt\t\t DevCov\t\t SSACov\t\tExact solution\tError\n");
react.h = 2*2.0/(STEPS_IN_H - 1);
ssaCov = ssa(ssaLat, N, 10000, react);
exactSolution = exactMagnetization(react);
for(j = DTsteps; j > 0; j--){
args.deltaT = j * (largestDt - smallestDt)/DTsteps + smallestDt;
meanCov = 0.0;
for(i = 0; i < nReals; i++){
//Burning time
fractionalStep(lat,devLat,BURNING, react, devStates, deltaT, N, cov, devCov, args);
//Actual computation
meanCov += fractionalStep(lat,devLat,steps,
react, devStates, deltaT, N, cov, devCov,args);
}
meanCov = meanCov/nReals;
fprintf(results,"%f\t%f\n",args.deltaT,meanCov,exactSolution,fabs(meanCov-exactSolution));
printf("%f\t%f\t%f\t%f\t%1.2e\n",args.deltaT, meanCov, ssaCov,
exactSolution,fabs(meanCov-exactSolution));
}
free(ssaLat);
}
//============================================================
/*
Comparison between exact solution and approximate solution while changing both
Dt and h (potential).
*/
void benchDt2(int* lat, int* devLat, int N,float deltaT,
int *cov, int*devCov, reactData react, FILE* results, hiprandState_t *devStates, arguments args){
int j,i;
float meanCov;
float exactSolution;
const int steps = (int)(args.finalT/deltaT);
const int DTsteps = 3; // Different Dt to try
const float largestDt = 3.0;
const float smallestDt = 1/2.0;
printf("\n Dt\t\tDevCov\t\tExact solution\tError\n");
for(j = DTsteps; j > 0; j--){
args.deltaT = j * (largestDt - smallestDt)/DTsteps + smallestDt;
for(i = 0; i < STEPS_IN_H; i++){
react.h = i * 2.0/(STEPS_IN_H-1);
//Burning time
fractionalStep(lat,devLat,BURNING, react, devStates, deltaT, N, cov, devCov, args);
//Actual computation
meanCov = fractionalStep(lat,devLat,steps,
react, devStates, deltaT, N, cov, devCov,args);
exactSolution = exactMagnetization(react);
fprintf(results,"%f\t%f\t%f\t%f\t%f\n",args.deltaT,react.h,meanCov,exactSolution,fabs(meanCov-exactSolution));
printf("%f\t%f\t%f\t%1.2e\n",args.deltaT, meanCov,exactSolution,fabs(meanCov-exactSolution));
}
}
}
//============================================================
//% exact solution of the mean coverage of the Ising model
//% with spins in {0,1}
float exactMagnetization(reactData react){
float b = react.b;
float h = react.h;
float K01 = react.J;
h = -h/2+K01/2;
float K = K01/4;
float a = sinh(b*h);
b = a*a + exp(-b*4*K);
float y = a/sqrt(b);
y = (y+1)/2;
return y;
}
| 19c680889bab0d9d29ee3c631a72bf6de05c1de8.cu | #include"1dIsing.h"
/* Benchmarks */
void benchH(int* lat, int* devLat, int N,float deltaT,
int *cov, int*devCov, reactData react, FILE* results, curandState *devStates, arguments args){
int j = 0;
int steps = (int)(args.finalT/deltaT);
float meanCov;
float ssaCov;
int *ssaLat = (int*)malloc(N * sizeof(int));
if(ssaLat == NULL){
printf("ssaLat = NULL\n");
exit(1);
}
for(j = 0;j < N;j++)
ssaLat[j] = lat[j];
printf("\n h\t\t DevCov\t\t SSACov\t\tExact solution\tError\n");
j = 0;
for(j = 0; j < STEPS_IN_H; j++){
react.h = j*2.0/(STEPS_IN_H - 1);
//Burning time
fractionalStep(lat,devLat,BURNING, react, devStates, deltaT, N, cov, devCov, args);
//Actual computation
meanCov = fractionalStep(lat,devLat,steps,
react, devStates, deltaT, N, cov, devCov,args);
// ssaCov = ssa(ssaLat, N, 10000, react);
fprintf(results,"%f\t%f\n",react.h,meanCov);
printf("%f\t%f\t%f\t%f\t%1.2e\n",react.h,meanCov,ssaCov,
exactMagnetization(react),fabs(meanCov-exactMagnetization(react)));
}
free(ssaLat);
}
//============================================================
void benchError(int* lat, int* devLat, int N,float deltaT,
int *cov, int*devCov, reactData react, FILE* results, curandState *devStates, arguments args){
int i, j;
int top = 10; // Change the top variable to take more points in the benchmark
float ssaCov;
int nReals = 20; // Number of realizations to take
int steps = (int)(args.finalT/deltaT);
float meanCov;
if(N!= (int)pow(2,top)){
printf("ERROR : benchError :\n\tDimension of lattice : %d\n\tDimension required for this test : %d\n\t",N, (int)pow(2,top));
printf("Either change the 'top' variable in 'benchError'\n\tor the size of the lattice\n\t");
printf("In the second case, split %d between threads (-t) and blocks (-b)\n",(int)pow(2,top)/8);
exit(1);
}
if(args.blocks != 2){
printf("ERROR : benchError :\tThe number of blocks must be two for this benchmark.\n");
exit(1);
}
ssaCov = ssa(lat, N, 10000, react);
printf("Offset\t <m> (fs)\t <m> (ssa)\t exact\t\t Error\n");
for(i = 2;i < (top - 1);i++){
args.offSet = (int)pow(2, i );
args.threads = (int)pow(2, top - i - 2);
args.blocks = 2;
meanCov = 0.0;
for(j = 0;j < nReals;j++){
//Burning time
//fractionalStep(lat,devLat,BURNING, react, devStates, deltaT, N, cov, devCov, args);
//Actual computation
meanCov += fractionalStep(lat,devLat,steps, react, devStates, deltaT, N, cov, devCov,args);
}
meanCov = meanCov/nReals;
fprintf(results,"%d\t\t%f\n",args.offSet, meanCov, ssaCov,fabs(meanCov - ssaCov));
printf("%d\t%f\t%f\t%f\t%1.2e\n",args.offSet,meanCov,ssaCov, exactMagnetization(react),fabs(meanCov-exactMagnetization(react)));
}
}
//===========================================================
/*
Benchmark for different DT and constant h
*/
void benchDt(int* lat, int* devLat, int N,float deltaT,
int *cov, int*devCov, reactData react, FILE* results, curandState *devStates, arguments args){
int j,i;
float meanCov;
float ssaCov;
float exactSolution;
int *ssaLat = (int*)malloc(N * sizeof(int));
const int steps = (int)(args.finalT/deltaT);
const int DTsteps = 20; // Different Dt to try
const float largestDt = 3.0;
const float smallestDt = 1/2.0;
const int nReals = 10; // Number of realizations to take
if(ssaLat == NULL){
printf("ssaLat = NULL\n");
exit(1);
}
for(j = 0;j < N;j++)
ssaLat[j] = lat[j];
printf("\n Dt\t\t DevCov\t\t SSACov\t\tExact solution\tError\n");
react.h = 2*2.0/(STEPS_IN_H - 1);
ssaCov = ssa(ssaLat, N, 10000, react);
exactSolution = exactMagnetization(react);
for(j = DTsteps; j > 0; j--){
args.deltaT = j * (largestDt - smallestDt)/DTsteps + smallestDt;
meanCov = 0.0;
for(i = 0; i < nReals; i++){
//Burning time
fractionalStep(lat,devLat,BURNING, react, devStates, deltaT, N, cov, devCov, args);
//Actual computation
meanCov += fractionalStep(lat,devLat,steps,
react, devStates, deltaT, N, cov, devCov,args);
}
meanCov = meanCov/nReals;
fprintf(results,"%f\t%f\n",args.deltaT,meanCov,exactSolution,fabs(meanCov-exactSolution));
printf("%f\t%f\t%f\t%f\t%1.2e\n",args.deltaT, meanCov, ssaCov,
exactSolution,fabs(meanCov-exactSolution));
}
free(ssaLat);
}
//============================================================
/*
Comparison between exact solution and approximate solution while changing both
Dt and h (potential).
*/
void benchDt2(int* lat, int* devLat, int N,float deltaT,
int *cov, int*devCov, reactData react, FILE* results, curandState *devStates, arguments args){
int j,i;
float meanCov;
float exactSolution;
const int steps = (int)(args.finalT/deltaT);
const int DTsteps = 3; // Different Dt to try
const float largestDt = 3.0;
const float smallestDt = 1/2.0;
printf("\n Dt\t\tDevCov\t\tExact solution\tError\n");
for(j = DTsteps; j > 0; j--){
args.deltaT = j * (largestDt - smallestDt)/DTsteps + smallestDt;
for(i = 0; i < STEPS_IN_H; i++){
react.h = i * 2.0/(STEPS_IN_H-1);
//Burning time
fractionalStep(lat,devLat,BURNING, react, devStates, deltaT, N, cov, devCov, args);
//Actual computation
meanCov = fractionalStep(lat,devLat,steps,
react, devStates, deltaT, N, cov, devCov,args);
exactSolution = exactMagnetization(react);
fprintf(results,"%f\t%f\t%f\t%f\t%f\n",args.deltaT,react.h,meanCov,exactSolution,fabs(meanCov-exactSolution));
printf("%f\t%f\t%f\t%1.2e\n",args.deltaT, meanCov,exactSolution,fabs(meanCov-exactSolution));
}
}
}
//============================================================
//% exact solution of the mean coverage of the Ising model
//% with spins in {0,1}
float exactMagnetization(reactData react){
float b = react.b;
float h = react.h;
float K01 = react.J;
h = -h/2+K01/2;
float K = K01/4;
float a = sinh(b*h);
b = a*a + exp(-b*4*K);
float y = a/sqrt(b);
y = (y+1)/2;
return y;
}
|
686a0c8f1e6fbfc88cdcd395d1c363fb06dd0423.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <chrono>
#include <random>
#include <math.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
} \
}
void dot(float *a, float *b, float *y, int n){
for(int i = 0; i < n; ++i)
{
for(int j = 0; j < n; ++j)
{
double r = 0;
for(int k = 0; k < n; ++k)
{
r += a[i*n+k] * b[k*n+j];
}
y[i*n+j] = abs(r)<0.001?0: r;
}
}
}
void inputArray(float *a, int n){
for(int i=0;i<n*n;++i){
std::cin >> a[i];
}
}
void showArray(float *a, int n){
for(int i=0;i<n;++i){
for(int j=0;j<n;++j){
std::cout<<a[i*n+j]<<" ";
}
std::cout<<std::endl;
}
}
__global__ void mkide(float *a, int n){
int index = blockIdx.x * blockDim.x + threadIdx.x;
a[index*n+index] = 1;
}
__global__ void divRow(float *a, float *b, float *s, int n, int i){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index<n){
// printf("%d\n", index);
// printf("%f\n", b[i*n+index]);
float t = a[i*n+i];
a[i*n+index] /= t;
b[i*n+index] /= t;
s[index] = a[index * n + i];
// printf("%f %f\n", b[i*n+index], a[i*n+i]);
}
}
// __global__ void GaussElimination(float *a, float *b, int n, int i){
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if(index != i && index < n){
// float t = a[index*n+i];
// for(int k = 0; k < n; ++k){
// a[index*n+k] -= a[i*n+k]*t;
// b[index*n+k] -= b[i*n+k]*t;
// }
// }
// }
__global__ void GaussElimination(float *a, float *b, float *t, int n, int i){
//-- a?b[i*n:i*n+k]or --t?
//
int col = blockIdx.x;
int row = blockIdx.y*blockDim.x + threadIdx.x;
// if(col >= n || row >= n || i == col || i == row) return;
if(row >= n || i == col) return;
// printf("%d %d %d\n", col, row, blockDim.x);
int index = col * n + row;
// printf("%d %f %f\n", index, a[index], t[col]);
// a[index*n+k] -= __ldg(a[i*n+k])*t[col];
// b[index*n+k] -= __ldg(b[i*n+k])*t[col];
a[index] -= a[i*n+row]*t[col];
b[index] -= b[i*n+row]*t[col];
}
__constant__ float t[10000];
__host__ void GaussJordanGpuOptimize(float *a, float *b, int n){
int blockSize = n/32 + (n%32 ? 1 : 0);
printf("blockSize %d\n",blockSize);
hipLaunchKernelGGL(( mkide), dim3(blockSize),dim3(32), 0, 0, b, n);
hipDeviceSynchronize();
// for(int i = 0; i < n; ++i){
// b[i*n+i] = 1;
// }
dim3 thread(32);
dim3 block(n, n/32 + (n%32 ? 1 : 0));
printf("%d, %d, %d\n", n, n/32 + (n%32 ? 1 : 0), 32);
float *s;
hipMalloc(&s, sizeof(float)*n);
for(int i = 0;i<n; ++i){
int in = i*n;
// std::cout<<i<<" "<<in<<" "<<&a[in]<<" "<<&b[in]<<std::endl;
// std::cout<<thread.x<<" "<<thread.y<<" "<<thread.z<<std::endl;
// std::cout<<block.x<<" "<<block.y<<" "<<block.z<<std::endl;
hipLaunchKernelGGL(( divRow), dim3(blockSize), dim3(thread), 0, 0, a, b, s, n, i);
hipDeviceSynchronize();
// hipMemcpyToSymbol(t, &a[in], n, hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( GaussElimination), dim3(block), dim3(thread), 0, 0, a, b, s, n, i);
CHECK(hipDeviceSynchronize());
}
}
// __global__ void GaussJordanGpuOptimize(float *a, float *b, int n){
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if(index>=n)return;
// a[index*n+index] = 1;
// __syncthreads();
// for(int i = 0;i<n; ++i){
// int in = i*n;
// float t = a[in+i];
// a[in+index] /= t;
// b[in+index] /= t;
// __syncthreads();
// for(int j=0;j<n;++j){
// if(j != i){
// float t = a[j*n+i];
// a[j*n+index] -= a[in+index]*t;
// b[j*n+index] -= b[in+index]*t;
// }
// }
// __syncthreads();
// }
// }
__global__ void GaussJordanGpu(float *a, float *b, int n){
for(int i = 0; i < n; ++i){
b[i*n+i] = 1;
}
for(int i = 0; i < n; ++i){
float t = a[i*n+i];
for(int j = 0; j < n; ++j){
a[i*n+j] /= t;
b[i*n+j] /= t;
}
for(int j = 0; j < n; ++j){
if(i != j){
float t = a[j*n+i];
for(int k = 0; k < n; ++k){
a[j*n+k] -= a[i*n+k]*t;
b[j*n+k] -= b[i*n+k]*t;
}
}
}
}
}
void GaussJordan(float *a, float *b, int n){
for(int i = 0; i < n; ++i){
b[i*n+i] = 1;
}
for(int i = 0; i < n; ++i){
float t = a[i*n+i];
for(int j = 0; j < n; ++j){
a[i*n+j] /= t;
b[i*n+j] /= t;
}
for(int j = 0; j < n; ++j){
if(i != j){
float t = a[j*n+i];
for(int k = 0; k < n; ++k){
a[j*n+k] -= a[i*n+k]*t;
b[j*n+k] -= b[i*n+k]*t;
}
}
}
}
}
int main(){
int n = 1000;
// std::cin>>n;
std::chrono::system_clock::time_point start, end;
std::mt19937 mt(982359349);
std::uniform_real_distribution<> MyRand(-1.0, 1.0);
float *ref = (float*)calloc(n*n, sizeof(float));
for(int i = 0;i<n*n;i++){
ref[i] = MyRand(mt);
// std::cout<<ref[i]<<" ";
}
std::cout<<std::endl;
float *a, *b;
// *a = (float*)calloc(n*n, sizeof(float));
// *b = (float*)calloc(n*n, sizeof(float));
hipMalloc(&a, n*n*sizeof(float));
hipMalloc(&b, n*n*sizeof(float));
float *buf = (float*)calloc(n*n, sizeof(float));
float *a_ = (float*)calloc(n*n, sizeof(float));
float *b_ = (float*)calloc(n*n, sizeof(float));
float *y_ = (float*)calloc(n*n, sizeof(float));
// hipMallocManaged(&a, n*n*sizeof(float));
// hipMallocManaged(&b, n*n*sizeof(float));
for(int i = 0;i<n*n;i++){
a_[i] = MyRand(mt);
// std::cout<<ref[i]<<" ";
}
// float *a;
// CHECK(hipMallocManaged(&a, n*n*sizeof(float)));
// float *b;
// CHECK(hipMallocManaged(&b, n*n*sizeof(float)));
// for(int j = 10;j<=10000;j*=10){
// for(int i=1;i<10;++i){
// int N = j*i;
// int blockSize = N/32 + (N%32 ? 1 : 0);
// hipMemcpy(ref, a, n*n*sizeof(float), hipMemcpyHostToDevice);
// start = std::chrono::system_clock::now();
// // GaussJordan(a, b, N);
// GaussJordanGpuOptimize(a, b, N);
// // GaussJordanGpuOptimize<<<blockSize, 32>>>(a, b, N);
// hipDeviceSynchronize();
// end = std::chrono::system_clock::now();
// double elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count();
// std::cout<<N<<"\t"<<elapsed<<std::endl;
// }
// }
// float *s;
// hipMalloc(&s, sizeof(float)*n);
// dim3 thread(32);
// dim3 block(n, n/32 + (n%32 ? 1 : 0));
// hipMemcpy(a, a_, n*n*sizeof(float), hipMemcpyHostToDevice);
//hipLaunchKernelGGL(( GaussElimination), dim3(block), dim3(thread), 0, 0, a, b, s, n, i);
// // std::cout<<"hoge"<<std::endl;
// // inputArray(a, n);
// // std::cout<<"hoge"<<std::endl;
hipMemcpy(a, a_, n*n*sizeof(float), hipMemcpyHostToDevice);
// showArray(a_, n);
GaussJordanGpuOptimize(a, b, n);
hipDeviceSynchronize();
hipMemcpy(buf, a, n*n*sizeof(float), hipMemcpyDeviceToHost);
// showArray(buf, n);
hipMemcpy(b_, b, n*n*sizeof(float), hipMemcpyDeviceToHost);
// showArray(b_, n);
std::cout<<"meow"<<std::endl;
dot(a_, b_, y_, n);
double sum=0;
for(int i = 0;i<n*n;++i)sum+=buf[i];
std::cout<<sum<<std::endl;
sum = 0;
for(int i = 0;i<n*n;++i)sum+=y_[i];
std::cout<<sum<<std::endl;
// showArray(y_, n);
return 0;
} | 686a0c8f1e6fbfc88cdcd395d1c363fb06dd0423.cu | #include <iostream>
#include <chrono>
#include <random>
#include <math.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
} \
}
void dot(float *a, float *b, float *y, int n){
for(int i = 0; i < n; ++i)
{
for(int j = 0; j < n; ++j)
{
double r = 0;
for(int k = 0; k < n; ++k)
{
r += a[i*n+k] * b[k*n+j];
}
y[i*n+j] = abs(r)<0.001?0: r;
}
}
}
void inputArray(float *a, int n){
for(int i=0;i<n*n;++i){
std::cin >> a[i];
}
}
void showArray(float *a, int n){
for(int i=0;i<n;++i){
for(int j=0;j<n;++j){
std::cout<<a[i*n+j]<<" ";
}
std::cout<<std::endl;
}
}
__global__ void mkide(float *a, int n){
int index = blockIdx.x * blockDim.x + threadIdx.x;
a[index*n+index] = 1;
}
__global__ void divRow(float *a, float *b, float *s, int n, int i){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index<n){
// printf("%d\n", index);
// printf("%f\n", b[i*n+index]);
float t = a[i*n+i];
a[i*n+index] /= t;
b[i*n+index] /= t;
s[index] = a[index * n + i];
// printf("%f %f\n", b[i*n+index], a[i*n+i]);
}
}
// __global__ void GaussElimination(float *a, float *b, int n, int i){
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if(index != i && index < n){
// float t = a[index*n+i];
// for(int k = 0; k < n; ++k){
// a[index*n+k] -= a[i*n+k]*t;
// b[index*n+k] -= b[i*n+k]*t;
// }
// }
// }
__global__ void GaussElimination(float *a, float *b, float *t, int n, int i){
//-- a?b[i*n:i*n+k]はキャッシュorシェアード --、tはコンスタントメモリを使うべき?
//無駄になるスレッド多いし要修正
int col = blockIdx.x;
int row = blockIdx.y*blockDim.x + threadIdx.x;
// if(col >= n || row >= n || i == col || i == row) return;
if(row >= n || i == col) return;
// printf("%d %d %d\n", col, row, blockDim.x);
int index = col * n + row;
// printf("%d %f %f\n", index, a[index], t[col]);
// a[index*n+k] -= __ldg(a[i*n+k])*t[col];
// b[index*n+k] -= __ldg(b[i*n+k])*t[col];
a[index] -= a[i*n+row]*t[col];
b[index] -= b[i*n+row]*t[col];
}
__constant__ float t[10000];
__host__ void GaussJordanGpuOptimize(float *a, float *b, int n){
int blockSize = n/32 + (n%32 ? 1 : 0);
printf("blockSize %d\n",blockSize);
mkide<<<blockSize,32>>>(b, n);
cudaDeviceSynchronize();
// for(int i = 0; i < n; ++i){
// b[i*n+i] = 1;
// }
dim3 thread(32);
dim3 block(n, n/32 + (n%32 ? 1 : 0));
printf("%d, %d, %d\n", n, n/32 + (n%32 ? 1 : 0), 32);
float *s;
cudaMalloc(&s, sizeof(float)*n);
for(int i = 0;i<n; ++i){
int in = i*n;
// std::cout<<i<<" "<<in<<" "<<&a[in]<<" "<<&b[in]<<std::endl;
// std::cout<<thread.x<<" "<<thread.y<<" "<<thread.z<<std::endl;
// std::cout<<block.x<<" "<<block.y<<" "<<block.z<<std::endl;
divRow<<<blockSize, thread>>>(a, b, s, n, i);
cudaDeviceSynchronize();
// cudaMemcpyToSymbol(t, &a[in], n, cudaMemcpyDeviceToDevice);
GaussElimination<<<block, thread>>>(a, b, s, n, i);
CHECK(cudaDeviceSynchronize());
}
}
// __global__ void GaussJordanGpuOptimize(float *a, float *b, int n){
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if(index>=n)return;
// a[index*n+index] = 1;
// __syncthreads();
// for(int i = 0;i<n; ++i){
// int in = i*n;
// float t = a[in+i];
// a[in+index] /= t;
// b[in+index] /= t;
// __syncthreads();
// for(int j=0;j<n;++j){
// if(j != i){
// float t = a[j*n+i];
// a[j*n+index] -= a[in+index]*t;
// b[j*n+index] -= b[in+index]*t;
// }
// }
// __syncthreads();
// }
// }
__global__ void GaussJordanGpu(float *a, float *b, int n){
for(int i = 0; i < n; ++i){
b[i*n+i] = 1;
}
for(int i = 0; i < n; ++i){
float t = a[i*n+i];
for(int j = 0; j < n; ++j){
a[i*n+j] /= t;
b[i*n+j] /= t;
}
for(int j = 0; j < n; ++j){
if(i != j){
float t = a[j*n+i];
for(int k = 0; k < n; ++k){
a[j*n+k] -= a[i*n+k]*t;
b[j*n+k] -= b[i*n+k]*t;
}
}
}
}
}
void GaussJordan(float *a, float *b, int n){
for(int i = 0; i < n; ++i){
b[i*n+i] = 1;
}
for(int i = 0; i < n; ++i){
float t = a[i*n+i];
for(int j = 0; j < n; ++j){
a[i*n+j] /= t;
b[i*n+j] /= t;
}
for(int j = 0; j < n; ++j){
if(i != j){
float t = a[j*n+i];
for(int k = 0; k < n; ++k){
a[j*n+k] -= a[i*n+k]*t;
b[j*n+k] -= b[i*n+k]*t;
}
}
}
}
}
int main(){
int n = 1000;
// std::cin>>n;
std::chrono::system_clock::time_point start, end;
std::mt19937 mt(982359349);
std::uniform_real_distribution<> MyRand(-1.0, 1.0);
float *ref = (float*)calloc(n*n, sizeof(float));
for(int i = 0;i<n*n;i++){
ref[i] = MyRand(mt);
// std::cout<<ref[i]<<" ";
}
std::cout<<std::endl;
float *a, *b;
// *a = (float*)calloc(n*n, sizeof(float));
// *b = (float*)calloc(n*n, sizeof(float));
cudaMalloc(&a, n*n*sizeof(float));
cudaMalloc(&b, n*n*sizeof(float));
float *buf = (float*)calloc(n*n, sizeof(float));
float *a_ = (float*)calloc(n*n, sizeof(float));
float *b_ = (float*)calloc(n*n, sizeof(float));
float *y_ = (float*)calloc(n*n, sizeof(float));
// cudaMallocManaged(&a, n*n*sizeof(float));
// cudaMallocManaged(&b, n*n*sizeof(float));
for(int i = 0;i<n*n;i++){
a_[i] = MyRand(mt);
// std::cout<<ref[i]<<" ";
}
// float *a;
// CHECK(cudaMallocManaged(&a, n*n*sizeof(float)));
// float *b;
// CHECK(cudaMallocManaged(&b, n*n*sizeof(float)));
// for(int j = 10;j<=10000;j*=10){
// for(int i=1;i<10;++i){
// int N = j*i;
// int blockSize = N/32 + (N%32 ? 1 : 0);
// cudaMemcpy(ref, a, n*n*sizeof(float), cudaMemcpyHostToDevice);
// start = std::chrono::system_clock::now();
// // GaussJordan(a, b, N);
// GaussJordanGpuOptimize(a, b, N);
// // GaussJordanGpuOptimize<<<blockSize, 32>>>(a, b, N);
// cudaDeviceSynchronize();
// end = std::chrono::system_clock::now();
// double elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count();
// std::cout<<N<<"\t"<<elapsed<<std::endl;
// }
// }
// float *s;
// cudaMalloc(&s, sizeof(float)*n);
// dim3 thread(32);
// dim3 block(n, n/32 + (n%32 ? 1 : 0));
// cudaMemcpy(a, a_, n*n*sizeof(float), cudaMemcpyHostToDevice);
// GaussElimination<<<block, thread>>>(a, b, s, n, i);
// // std::cout<<"hoge"<<std::endl;
// // inputArray(a, n);
// // std::cout<<"hoge"<<std::endl;
cudaMemcpy(a, a_, n*n*sizeof(float), cudaMemcpyHostToDevice);
// showArray(a_, n);
GaussJordanGpuOptimize(a, b, n);
cudaDeviceSynchronize();
cudaMemcpy(buf, a, n*n*sizeof(float), cudaMemcpyDeviceToHost);
// showArray(buf, n);
cudaMemcpy(b_, b, n*n*sizeof(float), cudaMemcpyDeviceToHost);
// showArray(b_, n);
std::cout<<"meow"<<std::endl;
dot(a_, b_, y_, n);
double sum=0;
for(int i = 0;i<n*n;++i)sum+=buf[i];
std::cout<<sum<<std::endl;
sum = 0;
for(int i = 0;i<n*n;++i)sum+=y_[i];
std::cout<<sum<<std::endl;
// showArray(y_, n);
return 0;
} |
f0b0339307c59265c091f7146bd98cbaa61f6584.hip | // !!! This is a file automatically generated by hipify!!!
/* slip.cu
* GPU Benchmark Immersed Boundary Unstructured Grid
* Based on "The Method of Regularized Stokelets" by R.Cortez`
* Elastic force computed using energy-based formulation by Devendran + Peskin
* C.Copos 02/21/2012
*/
/* WHICH VERSION IS THIS? */
/* - velocity results are computed per thread block (256) exclusively in shared memory
* - works only up to 2048 mesh points
* - block number represents which node we compute velocity for
* - thread number (tx) within a block represents who contributes to velocity calculation
* - check against cpu & perfect agreement statically & dynamically (2/21/2013)
*/
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cstdio>
#include <cstdlib>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "test_kernel4.cu"
using namespace std;
const double visc = 1.0f;
const double drag = 1.0f;
const float def = 0.1;
// Lame constants
const double lambda = 1.0;
const double mu = 1.0; // shear modulus (G)
// time
const double TMAX = 0.1f;
const double tstep = 0.0001f;/*0.0001f;*/
static int tstop = floor(TMAX/tstep);
//static int tstop = 1;
// curve constants
const int N = 1024;
const double ri = 1.2f;
const double ds = ri/(N-1);
const double e = 1.2f*ds; // e: parameter determining width of blobs or cutoffs
// vector structure
typedef struct vector{
double x; // x-component
double y; // y-component
} vector;
// 2x2 matrix
typedef struct matrix{
double x1; // term (1,1)
double y1; // term (1,2)
double x2; // term (2,1)
double y2; // term (2,2)
} matrix;
// vertex
typedef struct vertex{
vector ref; // reference coords
vector def; // deformation coords
vector force; // force
int exterior; // 1 if this is a boundary point and 0 if this is an interior point
} vertex;
// triangle
typedef struct triangle{
int A;
int B;
int C;
double def_area; // deformed area
double ref_area; // reformed area
matrix f1; // term 1 of the forcing calculation
matrix f2_0_x; // x-component of term 2 of the forcing calculation for vertex 0
matrix f2_0_y; // y-component of term 2 of the forcing calculation for vertex 0
matrix f2_1_x; // x-component of term 2 of the forcing calculation for vertex 1
matrix f2_1_y; // y-component of term 2 of the forcing calculation for vertex 1
matrix f2_2_x; // x-component of term 2 of the forcing calculation for vertex 2
matrix f2_2_y; // y-component of term 2 of the forcing calculation for vertex 2
double f3; // term 3 of the forcing calculation
} triangle;
// gpu timing
double gpuTime = 0.0f;
// Compute time difference in seconds
double diffclock(clock_t s, clock_t e) {
double diffticks = s-e;
double diffms = (diffticks)/CLOCKS_PER_SEC;
return diffms;
}
// Set up preliminary info per triangle (i.e. reference area or term 3 and term 2)
void ref_info(int Npts, triangle &tr, vertex nodes[]) {
// term 3 (otherwise known as reference area)
tr.ref_area = 0.5*fabs( (nodes[tr.B].ref.x-nodes[tr.A].ref.x)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) - (nodes[tr.C].ref.x-nodes[tr.A].ref.x)*(nodes[tr.B].ref.y-nodes[tr.A].ref.y) );
tr.f3 = tr.ref_area;
// determinant of S
double detS;
detS = (1.0)*((nodes[tr.B].ref.x-nodes[tr.A].ref.x)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) - (nodes[tr.C].ref.x-nodes[tr.A].ref.x)*(nodes[tr.B].ref.y-nodes[tr.A].ref.y));
// term 2
tr.f2_0_x.x1 = (1.0/detS)*(-1.0*nodes[tr.C].ref.y + nodes[tr.B].ref.y);
tr.f2_0_x.y1 = (1.0/detS)*(nodes[tr.C].ref.x - nodes[tr.B].ref.x);
tr.f2_0_x.x2 = 0.0;
tr.f2_0_x.y2 = 0.0;
tr.f2_0_y.x1 = 0.0;
tr.f2_0_y.y1 = 0.0;
tr.f2_0_y.x2 = (1.0/detS)*(-1.0*nodes[tr.C].ref.y + nodes[tr.B].ref.y);
tr.f2_0_y.y2 = (1.0/detS)*(nodes[tr.C].ref.x - nodes[tr.B].ref.x);
tr.f2_1_x.x1 = (1.0/detS)*(nodes[tr.C].ref.y - nodes[tr.A].ref.y);
tr.f2_1_x.y1 = (1.0/detS)*(nodes[tr.A].ref.x - nodes[tr.C].ref.x);
tr.f2_1_x.x2 = 0.0;
tr.f2_1_x.y2 = 0.0;
tr.f2_1_y.x1 = 0.0;
tr.f2_1_y.y1 = 0.0;
tr.f2_1_y.x2 = (1.0/detS)*(nodes[tr.C].ref.y - nodes[tr.A].ref.y);
tr.f2_1_y.y2 = (1.0/detS)*(nodes[tr.A].ref.x - nodes[tr.C].ref.x);
tr.f2_2_x.x1 = (1.0/detS)*(nodes[tr.A].ref.y - nodes[tr.B].ref.y);
tr.f2_2_x.y1 = (1.0/detS)*(nodes[tr.B].ref.x - nodes[tr.A].ref.x);
tr.f2_2_x.x2 = 0.0;
tr.f2_2_x.y2 = 0.0;
tr.f2_2_y.x1 = 0.0;
tr.f2_2_y.y1 = 0.0;
tr.f2_2_y.x2 = (1.0/detS)*(nodes[tr.A].ref.y - nodes[tr.B].ref.y);
tr.f2_2_y.y2 = (1.0/detS)*(nodes[tr.B].ref.x - nodes[tr.A].ref.x);
}
// Set up deformation specific info per triangle (i.e. deformed area and term 1)
void def_info(int Npts, triangle &tr, vertex nodes[]) {
// deformed area
tr.def_area = 0.5*fabs((nodes[tr.B].def.x-nodes[tr.A].def.x)*(nodes[tr.C].def.y-nodes[tr.A].def.y) - (nodes[tr.B].def.y-nodes[tr.A].def.y)*(nodes[tr.C].def.x-nodes[tr.A].def.x) );
// deformation gradient tensor
matrix a;
double detS;
detS = (1.0)*((nodes[tr.B].ref.x-nodes[tr.A].ref.x)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) - (nodes[tr.C].ref.x-nodes[tr.A].ref.x)*(nodes[tr.B].ref.y-nodes[tr.A].ref.y));
a.x1 = (1.0/detS)*( (nodes[tr.B].def.x-nodes[tr.A].def.x)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) + (nodes[tr.C].def.x-nodes[tr.A].def.x)*(nodes[tr.A].ref.y - nodes[tr.B].ref.y) );
a.y1 = (1.0/detS)*( (nodes[tr.B].def.x-nodes[tr.A].def.x)*(nodes[tr.A].ref.x-nodes[tr.C].ref.x) + (nodes[tr.C].def.x-nodes[tr.A].def.x)*(nodes[tr.B].ref.x-nodes[tr.A].ref.x) );
a.x2 = (1.0/detS)*( (nodes[tr.B].def.y-nodes[tr.A].def.y)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) + (nodes[tr.C].def.y-nodes[tr.A].def.y)*(nodes[tr.A].ref.y-nodes[tr.B].ref.y) );
a.y2 = (1.0/detS)*( (nodes[tr.B].def.y-nodes[tr.A].def.y)*(nodes[tr.A].ref.x - nodes[tr.C].ref.x) + (nodes[tr.C].def.y-nodes[tr.A].def.y)*(nodes[tr.B].ref.x - nodes[tr.A].ref.x) );
// inverse transpose of deformation gradient tensor (w/o outside normalizers i.e. determinants)
matrix ait;
ait.x1 = a.y2;
ait.y1 = (-1.0)*(a.x2);
ait.x2 = (-1.0)*(a.y1);
ait.y2 = a.x1;
// Cauchy stress tensor
matrix sigma;
// Material displacement gradient tensor ( = deformation gradient tensor - I)
matrix d;
d.x1 = a.x1 - 1.0;
d.y1 = a.y1;
d.x2 = a.x2;
d.y2 = a.y2 - 1.0;
sigma.x1 = lambda*(d.x1+d.y2) + 2.0*mu*d.x1;
sigma.y1 = mu*(d.y1+d.x2);
sigma.x2 = mu*(d.x2+d.y1);
sigma.y2 = lambda*(d.x1+d.y2) + 2.0*mu*d.y2;
// term 1 (otherwise known as 1st Piola-Kirchhoff tensor)
tr.f1.x1 = ( sigma.x1*ait.x1 + sigma.y1*ait.x2 );
tr.f1.y1 = ( sigma.x1*ait.y1 + sigma.y1*ait.y2 );
tr.f1.x2 = ( sigma.x2*ait.x1 + sigma.y2*ait.x2 );
tr.f1.y2 = ( sigma.x2*ait.y1 + sigma.y2*ait.y2 );
}
// Compute velocity vector for all points in the grid
void velocity(int Npts, int Ntris, vertex Nodes[], vector f[], vector v[]) {
int mem_size = Npts;
int dim = Npts/256; // number of thread blocks that make up a vector computation
// Allocate host memory for result (velocity)
double *vx = (double*) malloc(mem_size*dim*sizeof(double));
double *vy = (double*) malloc(mem_size*dim*sizeof(double));
// Allocate and fill host memory for force
double *fxh = (double*) malloc(mem_size*sizeof(double));
double *fyh = (double*) malloc(mem_size*sizeof(double));
for(int j=0; j<Npts; j++) {
fxh[j] = f[j].x;
fyh[j] = f[j].y;
}
// Allocate and fill host memory for position
double *xh = (double*) malloc(mem_size*sizeof(double));
double *yh = (double*) malloc(mem_size*sizeof(double));
for(int j=0; j<Npts; j++) {
xh[j] = Nodes[j].def.x;
yh[j] = Nodes[j].def.y;
}
// Allocate device memory for x, y, F, v, and G (where G is the Stokeslet matrix)
double *xd, *yd, *Fxd, *Fyd, *tmpx, *tmpy;
hipMalloc((void**) &xd, mem_size*sizeof(double));
hipMalloc((void**) &yd, mem_size*sizeof(double));
hipMalloc((void**) &Fxd, mem_size*sizeof(double));
hipMalloc((void**) &Fyd, mem_size*sizeof(double));
hipMalloc((void**) &tmpx, mem_size*dim*sizeof(double));
hipMalloc((void**) &tmpy, mem_size*dim*sizeof(double));
// Initialize device memory to zero
hipMemset(tmpx, 0x0, mem_size*dim*sizeof(double));
hipMemset(tmpy, 0x0, mem_size*dim*sizeof(double));
// Copy position and force arrays to allocated device memory locations
hipMemcpy(xd, xh, mem_size*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(yd, yh, mem_size*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(Fxd, fxh, mem_size*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(Fyd, fyh, mem_size*sizeof(double), hipMemcpyHostToDevice);
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventRecord(start, 0);
// Perform Stokeslet computation
dim3 threads(THREADS, 1);
dim3 grid(BLOCKS, 1);
double esq = e*e;
hipLaunchKernelGGL(( SlipKernel), dim3(grid), dim3(threads) , 0, 0, xd, yd, Fxd, Fyd, tmpx, tmpy, visc, e, esq);
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Copy the result from device to host
hipMemcpy(vx, tmpx, mem_size*dim*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(vy, tmpy, mem_size*dim*sizeof(double), hipMemcpyDeviceToHost);
/*for(int i=0; i<mem_size*dim; i++) {
printf("(vx, vy) = (%.16f,%.16f)\n", vx[i], vy[i]);
}*/
// Set velocity
for(int j=0; j<Npts*dim; j+=dim) {
int index = floor(j/dim);
v[index].x = 0.0;
v[index].y = 0.0;
for(int i=0; i<dim; i++) {
v[index].x += vx[j+i];
v[index].y += vy[j+i];
}
//printf("final vel: (%.16f, %.16f)\n", v[index].x, v[index].y);
}
// Report timing
hipEventElapsedTime(&elapsedTime, start, stop);
gpuTime += elapsedTime;
// Clean up
free(xh); free(yh);
free(fxh); free(fyh);
free(vx); free(vy);
hipFree(xd); hipFree(yd);
hipFree(Fxd); hipFree(Fyd);
hipFree(tmpx); hipFree(tmpy);
}
// Progression
void progress(int Npts, int Ntris, vertex Nodes[], triangle Tris[]) {
vector pos_init[Npts];
vector vel[Npts];
vector v[Npts];
vector f[Npts];
double ftime = 0.0f;
// file handling
ofstream f1, f2, f3;
f1.open("initial_pos_conf.txt"); f2.open("final_pos_conf.txt"); f3.open("ref_pos_conf.txt");
// print initial configuration (i.e. with initial deformation as described in startCurve() )
for(int i=0; i<Npts; i++) {
// zero the force
Nodes[i].force.x = 0.0;
Nodes[i].force.y = 0.0;
pos_init[i].x = Nodes[i].def.x;
pos_init[i].y = Nodes[i].def.y;
f1 << pos_init[i].x << " " << pos_init[i].y << endl;
f3 << Nodes[i].ref.x << " " << Nodes[i].ref.y << endl;
}
f1.close();
f3.close();
for(int t=0; t<tstop; t++) {
clock_t fbegin = clock();
float ref_Tarea = 0.0; float def_Tarea = 0.0;
// CYCLE THROUGH TRIANGLES AND COMPUTE FORCES
for(int j=0; j<Ntris; j++) {
def_info(Npts, Tris[j], Nodes);
// vertex A
Nodes[Tris[j].A].force.x += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_0_x.x1 + Tris[j].f1.y1*Tris[j].f2_0_x.y1 + Tris[j].f1.x2*Tris[j].f2_0_x.x2 + Tris[j].f1.y2*Tris[j].f2_0_x.y2);
Nodes[Tris[j].A].force.y += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_0_y.x1 + Tris[j].f1.y1*Tris[j].f2_0_y.y1 + Tris[j].f1.x2*Tris[j].f2_0_y.x2 + Tris[j].f1.y2*Tris[j].f2_0_y.y2);
// vertex B
Nodes[Tris[j].B].force.x += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_1_x.x1 + Tris[j].f1.y1*Tris[j].f2_1_x.y1 + Tris[j].f1.x2*Tris[j].f2_1_x.x2 + Tris[j].f1.y2*Tris[j].f2_1_x.y2);
Nodes[Tris[j].B].force.y += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_1_y.x1 + Tris[j].f1.y1*Tris[j].f2_1_y.y1 + Tris[j].f1.x2*Tris[j].f2_1_y.x2 + Tris[j].f1.y2*Tris[j].f2_1_y.y2);
// vertex C
Nodes[Tris[j].C].force.x += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_2_x.x1 + Tris[j].f1.y1*Tris[j].f2_2_x.y1 + Tris[j].f1.x2*Tris[j].f2_2_x.x2 + Tris[j].f1.y2*Tris[j].f2_2_x.y2);
Nodes[Tris[j].C].force.y += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_2_y.x1 + Tris[j].f1.y1*Tris[j].f2_2_y.y1 + Tris[j].f1.x2*Tris[j].f2_2_y.x2 + Tris[j].f1.y2*Tris[j].f2_2_y.y2);
ref_Tarea += Tris[j].ref_area;
def_Tarea += Tris[j].def_area;
}
clock_t fend = clock();
for(int k=0; k<Npts; k++) {
f[k].x = Nodes[k].force.x;
f[k].y = Nodes[k].force.y;
Nodes[k].force.x = 0.0;
Nodes[k].force.y = 0.0;
}
// compute velocity fields
velocity(Npts, Ntris, Nodes, f, vel);
// for each node in unstructured mesh
for(int i=0; i<Npts; i++) {
v[i].x = vel[i].x + f[i].x/drag;
v[i].y = vel[i].y + f[i].y/drag;
Nodes[i].def.x = Nodes[i].def.x + tstep*v[i].x;
Nodes[i].def.y = Nodes[i].def.y + tstep*v[i].y;
if(t==tstop-1) { f2 << Nodes[i].def.x << " " << Nodes[i].def.y << endl; }
}
double fpart = diffclock(fend, fbegin);
ftime = fpart + ftime;
}
f2.close();
// compute final area
printf("Total focing computation time (s): %.10f\n", ftime);
}
// Draw starting configuration
void startCurve() {
// file handling
ifstream f1;
ifstream f2;
ifstream f3;
f1.open("data/UnitCirclePointsN1024.txt");
f2.open("data/UnitCircleTrianglesN1024.txt");
// determine length
int Npoints = -1;
int Ntris = -1;
string c1;
string c2;
while( !f1.eof() ) {
getline(f1, c1);
Npoints++;
}
f1.close();
while( !f2.eof() ) {
getline(f2, c2);
Ntris++;
}
f2.close();
f1.open("data/UnitCirclePointsN1024.txt");
f2.open("data/UnitCircleTrianglesN1024.txt");
f3.open("data/UnitCircleBoundaryN1024.txt");
vector Nodes[Npoints];
triangle Tris[Ntris];
int Boundary[Npoints];
int counter = 0;
double d1, d2;
while(f1 >> d1 >> d2) {
Nodes[counter].x = d1;
Nodes[counter].y = d2;
counter++;
}
f1.close();
counter = 0;
int i1, i2, i3;
while(f2 >> i1 >> i2 >> i3) {
Tris[counter].A = i1-1;
Tris[counter].B = i2-1;
Tris[counter].C = i3-1;
counter++;
}
f2.close();
counter = 0;
int ext;
// set all points to interior points
for(int k=0; k<Npoints; k++) {
Boundary[k] = 0;
}
while(f3 >> ext) {
Boundary[ext-1] = 1;
counter++;
}
f3.close();
// output to array of vertices and array of triangles
vertex Points[Npoints];
for(int i=0; i<Npoints; i++) {
Points[i].ref.x = Nodes[i].x;
Points[i].ref.y = Nodes[i].y;
Points[i].exterior = Boundary[i];
// SPECIFY DEFORMATION HERE // Step 0: NO deformation
//Points[i].def.x = Nodes[i].x;
//Points[i].def.y = Nodes[i].y;
// SPECIFY DEFORMATION HERE // Step 1: LINEAR deformation
///// expansion /////
Points[i].def.x = (1.0 - def)*Nodes[i].x;
Points[i].def.y = (1.0 - def)*Nodes[i].y;
///// shear /////
//Points[i].def.x = Nodes[i].x + lambda*Nodes[i].y;
//Points[i].def.y = Nodes[i].y;
///// vertical stretch /////
//Points[i].def.x = Nodes[i].x;
//Points[i].def.y = (1.0 + lambda)*Nodes[i].y;
///// uniaxial extension /////
//Points[i].def.x = lambda*Nodes[i].x;
//Points[i].def.y = (1.0/lambda)*Nodes[i].y;
// SPECIFY DEFORMATION HERE // Step 2: NONLINEAR deformation
//Points[i].def.x = lambda*Nodes[i].x*Nodes[i].x;
//Points[i].def.y = Points[i].def.y;
}
for(int j=0; j<Ntris; j++) {
// find vertices
int iA = Tris[j].A; // index of A vertex
int iB = Tris[j].B; // index of B vertex
int iC = Tris[j].C; // index of C vertex
Points[iA].ref.x = Nodes[iA].x;
Points[iA].ref.y = Nodes[iA].y;
Points[iB].ref.x = Nodes[iB].x;
Points[iB].ref.y = Nodes[iB].y;
Points[iC].ref.x = Nodes[iC].x;
Points[iC].ref.y = Nodes[iC].y;
}
for(int k=0; k<Ntris; k++) {
// find forcing terms that remain constant with any deformation and timestep
ref_info(Npoints, Tris[k], Points);
}
progress(Npoints, Ntris, Points, Tris);
}
// Main
int main(int argc, char **argv) {
clock_t begin = clock();
startCurve();
clock_t end = clock();
printf("GPU computation time (ms): %.10f \n", gpuTime);
printf("Total computation time (s): %.10f\n", double(diffclock(end,begin)));
return 0;
}
| f0b0339307c59265c091f7146bd98cbaa61f6584.cu | /* slip.cu
* GPU Benchmark Immersed Boundary Unstructured Grid
* Based on "The Method of Regularized Stokelets" by R.Cortez`
* Elastic force computed using energy-based formulation by Devendran + Peskin
* C.Copos 02/21/2012
*/
/* WHICH VERSION IS THIS? */
/* - velocity results are computed per thread block (256) exclusively in shared memory
* - works only up to 2048 mesh points
* - block number represents which node we compute velocity for
* - thread number (tx) within a block represents who contributes to velocity calculation
* - check against cpu & perfect agreement statically & dynamically (2/21/2013)
*/
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cstdio>
#include <cstdlib>
#include <stdio.h>
#include <cuda.h>
#include "test_kernel4.cu"
using namespace std;
const double visc = 1.0f;
const double drag = 1.0f;
const float def = 0.1;
// Lame constants
const double lambda = 1.0;
const double mu = 1.0; // shear modulus (G)
// time
const double TMAX = 0.1f;
const double tstep = 0.0001f;/*0.0001f;*/
static int tstop = floor(TMAX/tstep);
//static int tstop = 1;
// curve constants
const int N = 1024;
const double ri = 1.2f;
const double ds = ri/(N-1);
const double e = 1.2f*ds; // e: parameter determining width of blobs or cutoffs
// vector structure
typedef struct vector{
double x; // x-component
double y; // y-component
} vector;
// 2x2 matrix
typedef struct matrix{
double x1; // term (1,1)
double y1; // term (1,2)
double x2; // term (2,1)
double y2; // term (2,2)
} matrix;
// vertex
typedef struct vertex{
vector ref; // reference coords
vector def; // deformation coords
vector force; // force
int exterior; // 1 if this is a boundary point and 0 if this is an interior point
} vertex;
// triangle
typedef struct triangle{
int A;
int B;
int C;
double def_area; // deformed area
double ref_area; // reformed area
matrix f1; // term 1 of the forcing calculation
matrix f2_0_x; // x-component of term 2 of the forcing calculation for vertex 0
matrix f2_0_y; // y-component of term 2 of the forcing calculation for vertex 0
matrix f2_1_x; // x-component of term 2 of the forcing calculation for vertex 1
matrix f2_1_y; // y-component of term 2 of the forcing calculation for vertex 1
matrix f2_2_x; // x-component of term 2 of the forcing calculation for vertex 2
matrix f2_2_y; // y-component of term 2 of the forcing calculation for vertex 2
double f3; // term 3 of the forcing calculation
} triangle;
// gpu timing
double gpuTime = 0.0f;
// Compute time difference in seconds
double diffclock(clock_t s, clock_t e) {
double diffticks = s-e;
double diffms = (diffticks)/CLOCKS_PER_SEC;
return diffms;
}
// Set up preliminary info per triangle (i.e. reference area or term 3 and term 2)
void ref_info(int Npts, triangle &tr, vertex nodes[]) {
// term 3 (otherwise known as reference area)
tr.ref_area = 0.5*fabs( (nodes[tr.B].ref.x-nodes[tr.A].ref.x)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) - (nodes[tr.C].ref.x-nodes[tr.A].ref.x)*(nodes[tr.B].ref.y-nodes[tr.A].ref.y) );
tr.f3 = tr.ref_area;
// determinant of S
double detS;
detS = (1.0)*((nodes[tr.B].ref.x-nodes[tr.A].ref.x)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) - (nodes[tr.C].ref.x-nodes[tr.A].ref.x)*(nodes[tr.B].ref.y-nodes[tr.A].ref.y));
// term 2
tr.f2_0_x.x1 = (1.0/detS)*(-1.0*nodes[tr.C].ref.y + nodes[tr.B].ref.y);
tr.f2_0_x.y1 = (1.0/detS)*(nodes[tr.C].ref.x - nodes[tr.B].ref.x);
tr.f2_0_x.x2 = 0.0;
tr.f2_0_x.y2 = 0.0;
tr.f2_0_y.x1 = 0.0;
tr.f2_0_y.y1 = 0.0;
tr.f2_0_y.x2 = (1.0/detS)*(-1.0*nodes[tr.C].ref.y + nodes[tr.B].ref.y);
tr.f2_0_y.y2 = (1.0/detS)*(nodes[tr.C].ref.x - nodes[tr.B].ref.x);
tr.f2_1_x.x1 = (1.0/detS)*(nodes[tr.C].ref.y - nodes[tr.A].ref.y);
tr.f2_1_x.y1 = (1.0/detS)*(nodes[tr.A].ref.x - nodes[tr.C].ref.x);
tr.f2_1_x.x2 = 0.0;
tr.f2_1_x.y2 = 0.0;
tr.f2_1_y.x1 = 0.0;
tr.f2_1_y.y1 = 0.0;
tr.f2_1_y.x2 = (1.0/detS)*(nodes[tr.C].ref.y - nodes[tr.A].ref.y);
tr.f2_1_y.y2 = (1.0/detS)*(nodes[tr.A].ref.x - nodes[tr.C].ref.x);
tr.f2_2_x.x1 = (1.0/detS)*(nodes[tr.A].ref.y - nodes[tr.B].ref.y);
tr.f2_2_x.y1 = (1.0/detS)*(nodes[tr.B].ref.x - nodes[tr.A].ref.x);
tr.f2_2_x.x2 = 0.0;
tr.f2_2_x.y2 = 0.0;
tr.f2_2_y.x1 = 0.0;
tr.f2_2_y.y1 = 0.0;
tr.f2_2_y.x2 = (1.0/detS)*(nodes[tr.A].ref.y - nodes[tr.B].ref.y);
tr.f2_2_y.y2 = (1.0/detS)*(nodes[tr.B].ref.x - nodes[tr.A].ref.x);
}
// Set up deformation specific info per triangle (i.e. deformed area and term 1)
void def_info(int Npts, triangle &tr, vertex nodes[]) {
// deformed area
tr.def_area = 0.5*fabs((nodes[tr.B].def.x-nodes[tr.A].def.x)*(nodes[tr.C].def.y-nodes[tr.A].def.y) - (nodes[tr.B].def.y-nodes[tr.A].def.y)*(nodes[tr.C].def.x-nodes[tr.A].def.x) );
// deformation gradient tensor
matrix a;
double detS;
detS = (1.0)*((nodes[tr.B].ref.x-nodes[tr.A].ref.x)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) - (nodes[tr.C].ref.x-nodes[tr.A].ref.x)*(nodes[tr.B].ref.y-nodes[tr.A].ref.y));
a.x1 = (1.0/detS)*( (nodes[tr.B].def.x-nodes[tr.A].def.x)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) + (nodes[tr.C].def.x-nodes[tr.A].def.x)*(nodes[tr.A].ref.y - nodes[tr.B].ref.y) );
a.y1 = (1.0/detS)*( (nodes[tr.B].def.x-nodes[tr.A].def.x)*(nodes[tr.A].ref.x-nodes[tr.C].ref.x) + (nodes[tr.C].def.x-nodes[tr.A].def.x)*(nodes[tr.B].ref.x-nodes[tr.A].ref.x) );
a.x2 = (1.0/detS)*( (nodes[tr.B].def.y-nodes[tr.A].def.y)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) + (nodes[tr.C].def.y-nodes[tr.A].def.y)*(nodes[tr.A].ref.y-nodes[tr.B].ref.y) );
a.y2 = (1.0/detS)*( (nodes[tr.B].def.y-nodes[tr.A].def.y)*(nodes[tr.A].ref.x - nodes[tr.C].ref.x) + (nodes[tr.C].def.y-nodes[tr.A].def.y)*(nodes[tr.B].ref.x - nodes[tr.A].ref.x) );
// inverse transpose of deformation gradient tensor (w/o outside normalizers i.e. determinants)
matrix ait;
ait.x1 = a.y2;
ait.y1 = (-1.0)*(a.x2);
ait.x2 = (-1.0)*(a.y1);
ait.y2 = a.x1;
// Cauchy stress tensor
matrix sigma;
// Material displacement gradient tensor ( = deformation gradient tensor - I)
matrix d;
d.x1 = a.x1 - 1.0;
d.y1 = a.y1;
d.x2 = a.x2;
d.y2 = a.y2 - 1.0;
sigma.x1 = lambda*(d.x1+d.y2) + 2.0*mu*d.x1;
sigma.y1 = mu*(d.y1+d.x2);
sigma.x2 = mu*(d.x2+d.y1);
sigma.y2 = lambda*(d.x1+d.y2) + 2.0*mu*d.y2;
// term 1 (otherwise known as 1st Piola-Kirchhoff tensor)
tr.f1.x1 = ( sigma.x1*ait.x1 + sigma.y1*ait.x2 );
tr.f1.y1 = ( sigma.x1*ait.y1 + sigma.y1*ait.y2 );
tr.f1.x2 = ( sigma.x2*ait.x1 + sigma.y2*ait.x2 );
tr.f1.y2 = ( sigma.x2*ait.y1 + sigma.y2*ait.y2 );
}
// Compute velocity vector for all points in the grid
void velocity(int Npts, int Ntris, vertex Nodes[], vector f[], vector v[]) {
int mem_size = Npts;
int dim = Npts/256; // number of thread blocks that make up a vector computation
// Allocate host memory for result (velocity)
double *vx = (double*) malloc(mem_size*dim*sizeof(double));
double *vy = (double*) malloc(mem_size*dim*sizeof(double));
// Allocate and fill host memory for force
double *fxh = (double*) malloc(mem_size*sizeof(double));
double *fyh = (double*) malloc(mem_size*sizeof(double));
for(int j=0; j<Npts; j++) {
fxh[j] = f[j].x;
fyh[j] = f[j].y;
}
// Allocate and fill host memory for position
double *xh = (double*) malloc(mem_size*sizeof(double));
double *yh = (double*) malloc(mem_size*sizeof(double));
for(int j=0; j<Npts; j++) {
xh[j] = Nodes[j].def.x;
yh[j] = Nodes[j].def.y;
}
// Allocate device memory for x, y, F, v, and G (where G is the Stokeslet matrix)
double *xd, *yd, *Fxd, *Fyd, *tmpx, *tmpy;
cudaMalloc((void**) &xd, mem_size*sizeof(double));
cudaMalloc((void**) &yd, mem_size*sizeof(double));
cudaMalloc((void**) &Fxd, mem_size*sizeof(double));
cudaMalloc((void**) &Fyd, mem_size*sizeof(double));
cudaMalloc((void**) &tmpx, mem_size*dim*sizeof(double));
cudaMalloc((void**) &tmpy, mem_size*dim*sizeof(double));
// Initialize device memory to zero
cudaMemset(tmpx, 0x0, mem_size*dim*sizeof(double));
cudaMemset(tmpy, 0x0, mem_size*dim*sizeof(double));
// Copy position and force arrays to allocated device memory locations
cudaMemcpy(xd, xh, mem_size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(yd, yh, mem_size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(Fxd, fxh, mem_size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(Fyd, fyh, mem_size*sizeof(double), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
// Perform Stokeslet computation
dim3 threads(THREADS, 1);
dim3 grid(BLOCKS, 1);
double esq = e*e;
SlipKernel<<< grid, threads >>>(xd, yd, Fxd, Fyd, tmpx, tmpy, visc, e, esq);
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Copy the result from device to host
cudaMemcpy(vx, tmpx, mem_size*dim*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(vy, tmpy, mem_size*dim*sizeof(double), cudaMemcpyDeviceToHost);
/*for(int i=0; i<mem_size*dim; i++) {
printf("(vx, vy) = (%.16f,%.16f)\n", vx[i], vy[i]);
}*/
// Set velocity
for(int j=0; j<Npts*dim; j+=dim) {
int index = floor(j/dim);
v[index].x = 0.0;
v[index].y = 0.0;
for(int i=0; i<dim; i++) {
v[index].x += vx[j+i];
v[index].y += vy[j+i];
}
//printf("final vel: (%.16f, %.16f)\n", v[index].x, v[index].y);
}
// Report timing
cudaEventElapsedTime(&elapsedTime, start, stop);
gpuTime += elapsedTime;
// Clean up
free(xh); free(yh);
free(fxh); free(fyh);
free(vx); free(vy);
cudaFree(xd); cudaFree(yd);
cudaFree(Fxd); cudaFree(Fyd);
cudaFree(tmpx); cudaFree(tmpy);
}
// Progression
void progress(int Npts, int Ntris, vertex Nodes[], triangle Tris[]) {
vector pos_init[Npts];
vector vel[Npts];
vector v[Npts];
vector f[Npts];
double ftime = 0.0f;
// file handling
ofstream f1, f2, f3;
f1.open("initial_pos_conf.txt"); f2.open("final_pos_conf.txt"); f3.open("ref_pos_conf.txt");
// print initial configuration (i.e. with initial deformation as described in startCurve() )
for(int i=0; i<Npts; i++) {
// zero the force
Nodes[i].force.x = 0.0;
Nodes[i].force.y = 0.0;
pos_init[i].x = Nodes[i].def.x;
pos_init[i].y = Nodes[i].def.y;
f1 << pos_init[i].x << " " << pos_init[i].y << endl;
f3 << Nodes[i].ref.x << " " << Nodes[i].ref.y << endl;
}
f1.close();
f3.close();
for(int t=0; t<tstop; t++) {
clock_t fbegin = clock();
float ref_Tarea = 0.0; float def_Tarea = 0.0;
// CYCLE THROUGH TRIANGLES AND COMPUTE FORCES
for(int j=0; j<Ntris; j++) {
def_info(Npts, Tris[j], Nodes);
// vertex A
Nodes[Tris[j].A].force.x += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_0_x.x1 + Tris[j].f1.y1*Tris[j].f2_0_x.y1 + Tris[j].f1.x2*Tris[j].f2_0_x.x2 + Tris[j].f1.y2*Tris[j].f2_0_x.y2);
Nodes[Tris[j].A].force.y += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_0_y.x1 + Tris[j].f1.y1*Tris[j].f2_0_y.y1 + Tris[j].f1.x2*Tris[j].f2_0_y.x2 + Tris[j].f1.y2*Tris[j].f2_0_y.y2);
// vertex B
Nodes[Tris[j].B].force.x += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_1_x.x1 + Tris[j].f1.y1*Tris[j].f2_1_x.y1 + Tris[j].f1.x2*Tris[j].f2_1_x.x2 + Tris[j].f1.y2*Tris[j].f2_1_x.y2);
Nodes[Tris[j].B].force.y += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_1_y.x1 + Tris[j].f1.y1*Tris[j].f2_1_y.y1 + Tris[j].f1.x2*Tris[j].f2_1_y.x2 + Tris[j].f1.y2*Tris[j].f2_1_y.y2);
// vertex C
Nodes[Tris[j].C].force.x += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_2_x.x1 + Tris[j].f1.y1*Tris[j].f2_2_x.y1 + Tris[j].f1.x2*Tris[j].f2_2_x.x2 + Tris[j].f1.y2*Tris[j].f2_2_x.y2);
Nodes[Tris[j].C].force.y += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_2_y.x1 + Tris[j].f1.y1*Tris[j].f2_2_y.y1 + Tris[j].f1.x2*Tris[j].f2_2_y.x2 + Tris[j].f1.y2*Tris[j].f2_2_y.y2);
ref_Tarea += Tris[j].ref_area;
def_Tarea += Tris[j].def_area;
}
clock_t fend = clock();
for(int k=0; k<Npts; k++) {
f[k].x = Nodes[k].force.x;
f[k].y = Nodes[k].force.y;
Nodes[k].force.x = 0.0;
Nodes[k].force.y = 0.0;
}
// compute velocity fields
velocity(Npts, Ntris, Nodes, f, vel);
// for each node in unstructured mesh
for(int i=0; i<Npts; i++) {
v[i].x = vel[i].x + f[i].x/drag;
v[i].y = vel[i].y + f[i].y/drag;
Nodes[i].def.x = Nodes[i].def.x + tstep*v[i].x;
Nodes[i].def.y = Nodes[i].def.y + tstep*v[i].y;
if(t==tstop-1) { f2 << Nodes[i].def.x << " " << Nodes[i].def.y << endl; }
}
double fpart = diffclock(fend, fbegin);
ftime = fpart + ftime;
}
f2.close();
// compute final area
printf("Total focing computation time (s): %.10f\n", ftime);
}
// Draw starting configuration
void startCurve() {
// file handling
ifstream f1;
ifstream f2;
ifstream f3;
f1.open("data/UnitCirclePointsN1024.txt");
f2.open("data/UnitCircleTrianglesN1024.txt");
// determine length
int Npoints = -1;
int Ntris = -1;
string c1;
string c2;
while( !f1.eof() ) {
getline(f1, c1);
Npoints++;
}
f1.close();
while( !f2.eof() ) {
getline(f2, c2);
Ntris++;
}
f2.close();
f1.open("data/UnitCirclePointsN1024.txt");
f2.open("data/UnitCircleTrianglesN1024.txt");
f3.open("data/UnitCircleBoundaryN1024.txt");
vector Nodes[Npoints];
triangle Tris[Ntris];
int Boundary[Npoints];
int counter = 0;
double d1, d2;
while(f1 >> d1 >> d2) {
Nodes[counter].x = d1;
Nodes[counter].y = d2;
counter++;
}
f1.close();
counter = 0;
int i1, i2, i3;
while(f2 >> i1 >> i2 >> i3) {
Tris[counter].A = i1-1;
Tris[counter].B = i2-1;
Tris[counter].C = i3-1;
counter++;
}
f2.close();
counter = 0;
int ext;
// set all points to interior points
for(int k=0; k<Npoints; k++) {
Boundary[k] = 0;
}
while(f3 >> ext) {
Boundary[ext-1] = 1;
counter++;
}
f3.close();
// output to array of vertices and array of triangles
vertex Points[Npoints];
for(int i=0; i<Npoints; i++) {
Points[i].ref.x = Nodes[i].x;
Points[i].ref.y = Nodes[i].y;
Points[i].exterior = Boundary[i];
// SPECIFY DEFORMATION HERE // Step 0: NO deformation
//Points[i].def.x = Nodes[i].x;
//Points[i].def.y = Nodes[i].y;
// SPECIFY DEFORMATION HERE // Step 1: LINEAR deformation
///// expansion /////
Points[i].def.x = (1.0 - def)*Nodes[i].x;
Points[i].def.y = (1.0 - def)*Nodes[i].y;
///// shear /////
//Points[i].def.x = Nodes[i].x + lambda*Nodes[i].y;
//Points[i].def.y = Nodes[i].y;
///// vertical stretch /////
//Points[i].def.x = Nodes[i].x;
//Points[i].def.y = (1.0 + lambda)*Nodes[i].y;
///// uniaxial extension /////
//Points[i].def.x = lambda*Nodes[i].x;
//Points[i].def.y = (1.0/lambda)*Nodes[i].y;
// SPECIFY DEFORMATION HERE // Step 2: NONLINEAR deformation
//Points[i].def.x = lambda*Nodes[i].x*Nodes[i].x;
//Points[i].def.y = Points[i].def.y;
}
for(int j=0; j<Ntris; j++) {
// find vertices
int iA = Tris[j].A; // index of A vertex
int iB = Tris[j].B; // index of B vertex
int iC = Tris[j].C; // index of C vertex
Points[iA].ref.x = Nodes[iA].x;
Points[iA].ref.y = Nodes[iA].y;
Points[iB].ref.x = Nodes[iB].x;
Points[iB].ref.y = Nodes[iB].y;
Points[iC].ref.x = Nodes[iC].x;
Points[iC].ref.y = Nodes[iC].y;
}
for(int k=0; k<Ntris; k++) {
// find forcing terms that remain constant with any deformation and timestep
ref_info(Npoints, Tris[k], Points);
}
progress(Npoints, Ntris, Points, Tris);
}
// Main
int main(int argc, char **argv) {
clock_t begin = clock();
startCurve();
clock_t end = clock();
printf("GPU computation time (ms): %.10f \n", gpuTime);
printf("Total computation time (s): %.10f\n", double(diffclock(end,begin)));
return 0;
}
|
90f013f71a48f0b3b1d6a77c59c518aeb0b86dc2.hip | // !!! This is a file automatically generated by hipify!!!
#include "../gpufit.h"
#include <vector>
#include <random>
#include <iostream>
#include <chrono>
#include <numeric>
#include <math.h>
#include <hip/hip_runtime.h>
#include <stdexcept>
#define CUDA_CHECK_STATUS( cuda_function_call ) \
if (hipError_t const status = cuda_function_call) \
{ \
throw std::runtime_error( hipGetErrorString( status ) ) ; \
}
template<class T>
struct GPU_array
{
GPU_array(std::size_t const size)
{
CUDA_CHECK_STATUS(hipMalloc(&data_, size * sizeof(T)));
}
GPU_array(std::vector<T> const & cpu_data) : GPU_array(cpu_data.size())
{
write(cpu_data);
}
GPU_array(std::size_t const & count, T const value) : GPU_array(count)
{
set(count, value);
}
~GPU_array() { hipFree(data_); }
operator T * () { return static_cast<T *>(data_); }
void read(std::vector<T> & to) const
{
CUDA_CHECK_STATUS(hipMemcpy(
to.data(), data_, to.size() * sizeof(T), hipMemcpyDeviceToHost));
}
void write(std::vector<T> const & from)
{
CUDA_CHECK_STATUS(hipMemcpy(
data_, from.data(), from.size() * sizeof(T), hipMemcpyHostToDevice));
}
void set(std::size_t const count, T const value)
{
CUDA_CHECK_STATUS(hipMemset(data_, 1, count * sizeof(T)));
}
private:
void * data_;
};
void generate_gauss_2d(
std::vector<REAL> const & x_coordinates,
std::vector<REAL> const & y_coordinates,
std::vector<REAL> const & gauss_params,
std::vector<REAL> & output_values)
{
// Generates a Gaussian 2D function at a set of X and Y coordinates. The Gaussian is defined by
// an array of five parameters.
// x_coordinates: Vector of X coordinates.
// y_coordinates: Vector of Y coordinates.
// gauss_params: Vector of function parameters.
// output_values: Output vector containing the values of the Gaussian function at the
// corresponding X, Y coordinates.
// gauss_params[0]: Amplitude
// gauss_params[1]: Center X position
// guass_params[2]: Center Y position
// gauss_params[3]: Gaussian width (standard deviation)
// gauss_params[4]: Baseline offset
// This code assumes that x_coordinates.size == y_coordinates.size == output_values.size
for (size_t i = 0; i < x_coordinates.size(); i++)
{
REAL arg = -( (x_coordinates[i] - gauss_params[1]) * (x_coordinates[i] - gauss_params[1])
+ (y_coordinates[i] - gauss_params[2]) * (y_coordinates[i] - gauss_params[2]) )
/ (2 * gauss_params[3] * gauss_params[3]);
output_values[i] = gauss_params[0] * exp(arg) + gauss_params[4];
}
}
void cuda_interface_example()
{
/*
This example generates test data on the CPU in form of 10000 two dimensional
Gaussian peaks with the size of 50x50 data points per peak. It is noised by Poisson
distributed noise. The initial guesses were randomized, within a specified range
of the true value. Before call to Gpufit the input data is transfered to GPU memory.
The GAUSS_2D model is fitted to the test data sets using the MLE estimator. After
calling Gpufit the output data is transfered to CPU memory.
The console output shows
- the execution time,
- the ratio of converged fits including ratios of not converged fits for
different reasons,
- the values of the true parameters and the mean values of the fitted
parameters including their standard deviation,
- the mean chi square value
- and the mean number of iterations needed.
True parameters and noise and number of fits is the same as for the Matlab/Python 2D Gaussian examples.
*/
// number of fits, fit points and parameters
size_t const n_fits = 10000;
size_t const size_x = 50;
size_t const n_points_per_fit = size_x * size_x;
size_t const n_parameters = 5;
// true parameters (amplitude, center x position, center y position, width, offset)
std::vector< REAL > true_parameters{ 10, 14.5f, 14.5f, 3, 10};
std::cout << "generate example data" << std::endl;
// initialize random number generator
std::mt19937 rng;
rng.seed(0);
std::uniform_real_distribution< REAL> uniform_dist(0, 1);
// initial parameters (randomized)
std::vector< REAL > initial_parameters(n_fits * n_parameters);
for (size_t i = 0; i < n_fits; i++)
{
for (size_t j = 0; j < n_parameters; j++)
{
if (j == 1 || j == 2)
{
initial_parameters[i * n_parameters + j]
= true_parameters[j] + true_parameters[3]
* (-.2f + .4f * uniform_dist(rng));
}
else
{
initial_parameters[i * n_parameters + j]
= true_parameters[j] * (.8f + .4f * uniform_dist(rng));
}
}
}
// generate x and y values
std::vector< REAL > x(n_points_per_fit);
std::vector< REAL > y(n_points_per_fit);
for (size_t i = 0; i < size_x; i++)
{
for (size_t j = 0; j < size_x; j++) {
x[i * size_x + j] = static_cast<REAL>(j);
y[i * size_x + j] = static_cast<REAL>(i);
}
}
// generate test data with Poisson noise
std::vector< REAL > temp(n_points_per_fit);
generate_gauss_2d(x, y, true_parameters, temp);
std::vector< REAL > data(n_fits * n_points_per_fit);
for (size_t i = 0; i < n_fits; i++)
{
for (size_t j = 0; j < n_points_per_fit; j++)
{
std::poisson_distribution< int > poisson_dist(temp[j]);
data[i * n_points_per_fit + j] = static_cast<REAL>(poisson_dist(rng));
}
}
// tolerance
REAL const tolerance = .001f;
// maximum number of iterations
int const max_n_iterations = 20;
// estimator ID
int const estimator_id = MLE;
// model ID
int const model_id = GAUSS_2D;
// parameters to fit (all of them)
std::vector< int > parameters_to_fit(n_parameters, 1);
// output parameters CPU
std::vector< REAL > output_parameters(n_fits * n_parameters);
std::vector< int > output_states(n_fits);
std::vector< REAL > output_chi_squares(n_fits);
std::vector< int > output_n_iterations(n_fits);
// input parameters GPU
GPU_array<REAL> gpu_data(data);
GPU_array<REAL> gpu_weights(data.size(), 1);
// input/output parameters GPU
GPU_array<REAL> gpu_initial_parameters(initial_parameters);
// output_parameters GPU
GPU_array<int> gpu_states(n_fits);
GPU_array<REAL> gpu_chi_squares(n_fits);
GPU_array<int> gpu_n_iterations(n_fits);
// call to gpufit_cuda_interface
std::chrono::high_resolution_clock::time_point time_0 = std::chrono::high_resolution_clock::now();
int const status = gpufit_cuda_interface
(
n_fits,
n_points_per_fit,
gpu_data,
gpu_weights,
model_id,
tolerance,
max_n_iterations,
parameters_to_fit.data(),
estimator_id,
0,
0,
gpu_initial_parameters,
gpu_states,
gpu_chi_squares,
gpu_n_iterations
);
std::chrono::high_resolution_clock::time_point time_1 = std::chrono::high_resolution_clock::now();
// check status
if (status != ReturnState::OK)
{
throw std::runtime_error(gpufit_get_last_error());
}
// copy output data to CPU memory
gpu_initial_parameters.read(output_parameters);
gpu_states.read(output_states);
gpu_chi_squares.read(output_chi_squares);
gpu_n_iterations.read(output_n_iterations);
// print execution time
std::cout << "execution time "
<< std::chrono::duration_cast<std::chrono::milliseconds>(time_1 - time_0).count() << " ms" << std::endl;
// get fit states
std::vector< int > output_states_histogram(5, 0);
for (std::vector< int >::iterator it = output_states.begin(); it != output_states.end(); ++it)
{
output_states_histogram[*it]++;
}
std::cout << "ratio converged " << (REAL)output_states_histogram[0] / n_fits << "\n";
std::cout << "ratio max iteration exceeded " << (REAL)output_states_histogram[1] / n_fits << "\n";
std::cout << "ratio singular hessian " << (REAL)output_states_histogram[2] / n_fits << "\n";
std::cout << "ratio neg curvature MLE " << (REAL)output_states_histogram[3] / n_fits << "\n";
std::cout << "ratio gpu not read " << (REAL)output_states_histogram[4] / n_fits << "\n";
// compute mean of fitted parameters for converged fits
std::vector< REAL > output_parameters_mean(n_parameters, 0);
for (size_t i = 0; i != n_fits; i++)
{
if (output_states[i] == FitState::CONVERGED)
{
for (size_t j = 0; j < n_parameters; j++)
{
output_parameters_mean[j] += output_parameters[i * n_parameters + j];
}
}
}
// normalize
for (size_t j = 0; j < n_parameters; j++)
{
output_parameters_mean[j] /= output_states_histogram[0];
}
// compute std of fitted parameters for converged fits
std::vector< REAL > output_parameters_std(n_parameters, 0);
for (size_t i = 0; i != n_fits; i++)
{
if (output_states[i] == FitState::CONVERGED)
{
for (size_t j = 0; j < n_parameters; j++)
{
output_parameters_std[j]
+= (output_parameters[i * n_parameters + j] - output_parameters_mean[j])
* (output_parameters[i * n_parameters + j] - output_parameters_mean[j]);
}
}
}
// normalize and take square root
for (size_t j = 0; j < n_parameters; j++)
{
output_parameters_std[j] = sqrt(output_parameters_std[j] / output_states_histogram[0]);
}
// print true value, fitted mean and std for every parameter
for (size_t j = 0; j < n_parameters; j++)
{
std::cout
<< "parameter " << j
<< " true " << true_parameters[j]
<< " fitted mean " << output_parameters_mean[j]
<< " std " << output_parameters_std[j] << std::endl;
}
// compute mean chi-square for those converged
REAL output_chi_square_mean = 0;
for (size_t i = 0; i != n_fits; i++)
{
if (output_states[i] == FitState::CONVERGED)
{
output_chi_square_mean += output_chi_squares[i];
}
}
output_chi_square_mean /= static_cast<REAL>(output_states_histogram[0]);
std::cout << "mean chi square " << output_chi_square_mean << std::endl;
// compute mean number of iterations for those converged
REAL output_number_iterations_mean = 0;
for (size_t i = 0; i != n_fits; i++)
{
if (output_states[i] == FitState::CONVERGED)
{
output_number_iterations_mean += static_cast<REAL>(output_n_iterations[i]);
}
}
// normalize
output_number_iterations_mean /= static_cast<REAL>(output_states_histogram[0]);
std::cout << "mean number of iterations " << output_number_iterations_mean << std::endl;
}
int main(int argc, char *argv[])
{
cuda_interface_example();
std::cout << std::endl << "Example completed!" << std::endl;
std::cout << "Press ENTER to exit" << std::endl;
std::getchar();
return 0;
}
| 90f013f71a48f0b3b1d6a77c59c518aeb0b86dc2.cu | #include "../gpufit.h"
#include <vector>
#include <random>
#include <iostream>
#include <chrono>
#include <numeric>
#include <math.h>
#include <cuda_runtime.h>
#include <stdexcept>
#define CUDA_CHECK_STATUS( cuda_function_call ) \
if (cudaError_t const status = cuda_function_call) \
{ \
throw std::runtime_error( cudaGetErrorString( status ) ) ; \
}
template<class T>
struct GPU_array
{
GPU_array(std::size_t const size)
{
CUDA_CHECK_STATUS(cudaMalloc(&data_, size * sizeof(T)));
}
GPU_array(std::vector<T> const & cpu_data) : GPU_array(cpu_data.size())
{
write(cpu_data);
}
GPU_array(std::size_t const & count, T const value) : GPU_array(count)
{
set(count, value);
}
~GPU_array() { cudaFree(data_); }
operator T * () { return static_cast<T *>(data_); }
void read(std::vector<T> & to) const
{
CUDA_CHECK_STATUS(cudaMemcpy(
to.data(), data_, to.size() * sizeof(T), cudaMemcpyDeviceToHost));
}
void write(std::vector<T> const & from)
{
CUDA_CHECK_STATUS(cudaMemcpy(
data_, from.data(), from.size() * sizeof(T), cudaMemcpyHostToDevice));
}
void set(std::size_t const count, T const value)
{
CUDA_CHECK_STATUS(cudaMemset(data_, 1, count * sizeof(T)));
}
private:
void * data_;
};
void generate_gauss_2d(
std::vector<REAL> const & x_coordinates,
std::vector<REAL> const & y_coordinates,
std::vector<REAL> const & gauss_params,
std::vector<REAL> & output_values)
{
// Generates a Gaussian 2D function at a set of X and Y coordinates. The Gaussian is defined by
// an array of five parameters.
// x_coordinates: Vector of X coordinates.
// y_coordinates: Vector of Y coordinates.
// gauss_params: Vector of function parameters.
// output_values: Output vector containing the values of the Gaussian function at the
// corresponding X, Y coordinates.
// gauss_params[0]: Amplitude
// gauss_params[1]: Center X position
// guass_params[2]: Center Y position
// gauss_params[3]: Gaussian width (standard deviation)
// gauss_params[4]: Baseline offset
// This code assumes that x_coordinates.size == y_coordinates.size == output_values.size
for (size_t i = 0; i < x_coordinates.size(); i++)
{
REAL arg = -( (x_coordinates[i] - gauss_params[1]) * (x_coordinates[i] - gauss_params[1])
+ (y_coordinates[i] - gauss_params[2]) * (y_coordinates[i] - gauss_params[2]) )
/ (2 * gauss_params[3] * gauss_params[3]);
output_values[i] = gauss_params[0] * exp(arg) + gauss_params[4];
}
}
void cuda_interface_example()
{
/*
This example generates test data on the CPU in form of 10000 two dimensional
Gaussian peaks with the size of 50x50 data points per peak. It is noised by Poisson
distributed noise. The initial guesses were randomized, within a specified range
of the true value. Before call to Gpufit the input data is transfered to GPU memory.
The GAUSS_2D model is fitted to the test data sets using the MLE estimator. After
calling Gpufit the output data is transfered to CPU memory.
The console output shows
- the execution time,
- the ratio of converged fits including ratios of not converged fits for
different reasons,
- the values of the true parameters and the mean values of the fitted
parameters including their standard deviation,
- the mean chi square value
- and the mean number of iterations needed.
True parameters and noise and number of fits is the same as for the Matlab/Python 2D Gaussian examples.
*/
// number of fits, fit points and parameters
size_t const n_fits = 10000;
size_t const size_x = 50;
size_t const n_points_per_fit = size_x * size_x;
size_t const n_parameters = 5;
// true parameters (amplitude, center x position, center y position, width, offset)
std::vector< REAL > true_parameters{ 10, 14.5f, 14.5f, 3, 10};
std::cout << "generate example data" << std::endl;
// initialize random number generator
std::mt19937 rng;
rng.seed(0);
std::uniform_real_distribution< REAL> uniform_dist(0, 1);
// initial parameters (randomized)
std::vector< REAL > initial_parameters(n_fits * n_parameters);
for (size_t i = 0; i < n_fits; i++)
{
for (size_t j = 0; j < n_parameters; j++)
{
if (j == 1 || j == 2)
{
initial_parameters[i * n_parameters + j]
= true_parameters[j] + true_parameters[3]
* (-.2f + .4f * uniform_dist(rng));
}
else
{
initial_parameters[i * n_parameters + j]
= true_parameters[j] * (.8f + .4f * uniform_dist(rng));
}
}
}
// generate x and y values
std::vector< REAL > x(n_points_per_fit);
std::vector< REAL > y(n_points_per_fit);
for (size_t i = 0; i < size_x; i++)
{
for (size_t j = 0; j < size_x; j++) {
x[i * size_x + j] = static_cast<REAL>(j);
y[i * size_x + j] = static_cast<REAL>(i);
}
}
// generate test data with Poisson noise
std::vector< REAL > temp(n_points_per_fit);
generate_gauss_2d(x, y, true_parameters, temp);
std::vector< REAL > data(n_fits * n_points_per_fit);
for (size_t i = 0; i < n_fits; i++)
{
for (size_t j = 0; j < n_points_per_fit; j++)
{
std::poisson_distribution< int > poisson_dist(temp[j]);
data[i * n_points_per_fit + j] = static_cast<REAL>(poisson_dist(rng));
}
}
// tolerance
REAL const tolerance = .001f;
// maximum number of iterations
int const max_n_iterations = 20;
// estimator ID
int const estimator_id = MLE;
// model ID
int const model_id = GAUSS_2D;
// parameters to fit (all of them)
std::vector< int > parameters_to_fit(n_parameters, 1);
// output parameters CPU
std::vector< REAL > output_parameters(n_fits * n_parameters);
std::vector< int > output_states(n_fits);
std::vector< REAL > output_chi_squares(n_fits);
std::vector< int > output_n_iterations(n_fits);
// input parameters GPU
GPU_array<REAL> gpu_data(data);
GPU_array<REAL> gpu_weights(data.size(), 1);
// input/output parameters GPU
GPU_array<REAL> gpu_initial_parameters(initial_parameters);
// output_parameters GPU
GPU_array<int> gpu_states(n_fits);
GPU_array<REAL> gpu_chi_squares(n_fits);
GPU_array<int> gpu_n_iterations(n_fits);
// call to gpufit_cuda_interface
std::chrono::high_resolution_clock::time_point time_0 = std::chrono::high_resolution_clock::now();
int const status = gpufit_cuda_interface
(
n_fits,
n_points_per_fit,
gpu_data,
gpu_weights,
model_id,
tolerance,
max_n_iterations,
parameters_to_fit.data(),
estimator_id,
0,
0,
gpu_initial_parameters,
gpu_states,
gpu_chi_squares,
gpu_n_iterations
);
std::chrono::high_resolution_clock::time_point time_1 = std::chrono::high_resolution_clock::now();
// check status
if (status != ReturnState::OK)
{
throw std::runtime_error(gpufit_get_last_error());
}
// copy output data to CPU memory
gpu_initial_parameters.read(output_parameters);
gpu_states.read(output_states);
gpu_chi_squares.read(output_chi_squares);
gpu_n_iterations.read(output_n_iterations);
// print execution time
std::cout << "execution time "
<< std::chrono::duration_cast<std::chrono::milliseconds>(time_1 - time_0).count() << " ms" << std::endl;
// get fit states
std::vector< int > output_states_histogram(5, 0);
for (std::vector< int >::iterator it = output_states.begin(); it != output_states.end(); ++it)
{
output_states_histogram[*it]++;
}
std::cout << "ratio converged " << (REAL)output_states_histogram[0] / n_fits << "\n";
std::cout << "ratio max iteration exceeded " << (REAL)output_states_histogram[1] / n_fits << "\n";
std::cout << "ratio singular hessian " << (REAL)output_states_histogram[2] / n_fits << "\n";
std::cout << "ratio neg curvature MLE " << (REAL)output_states_histogram[3] / n_fits << "\n";
std::cout << "ratio gpu not read " << (REAL)output_states_histogram[4] / n_fits << "\n";
// compute mean of fitted parameters for converged fits
std::vector< REAL > output_parameters_mean(n_parameters, 0);
for (size_t i = 0; i != n_fits; i++)
{
if (output_states[i] == FitState::CONVERGED)
{
for (size_t j = 0; j < n_parameters; j++)
{
output_parameters_mean[j] += output_parameters[i * n_parameters + j];
}
}
}
// normalize
for (size_t j = 0; j < n_parameters; j++)
{
output_parameters_mean[j] /= output_states_histogram[0];
}
// compute std of fitted parameters for converged fits
std::vector< REAL > output_parameters_std(n_parameters, 0);
for (size_t i = 0; i != n_fits; i++)
{
if (output_states[i] == FitState::CONVERGED)
{
for (size_t j = 0; j < n_parameters; j++)
{
output_parameters_std[j]
+= (output_parameters[i * n_parameters + j] - output_parameters_mean[j])
* (output_parameters[i * n_parameters + j] - output_parameters_mean[j]);
}
}
}
// normalize and take square root
for (size_t j = 0; j < n_parameters; j++)
{
output_parameters_std[j] = sqrt(output_parameters_std[j] / output_states_histogram[0]);
}
// print true value, fitted mean and std for every parameter
for (size_t j = 0; j < n_parameters; j++)
{
std::cout
<< "parameter " << j
<< " true " << true_parameters[j]
<< " fitted mean " << output_parameters_mean[j]
<< " std " << output_parameters_std[j] << std::endl;
}
// compute mean chi-square for those converged
REAL output_chi_square_mean = 0;
for (size_t i = 0; i != n_fits; i++)
{
if (output_states[i] == FitState::CONVERGED)
{
output_chi_square_mean += output_chi_squares[i];
}
}
output_chi_square_mean /= static_cast<REAL>(output_states_histogram[0]);
std::cout << "mean chi square " << output_chi_square_mean << std::endl;
// compute mean number of iterations for those converged
REAL output_number_iterations_mean = 0;
for (size_t i = 0; i != n_fits; i++)
{
if (output_states[i] == FitState::CONVERGED)
{
output_number_iterations_mean += static_cast<REAL>(output_n_iterations[i]);
}
}
// normalize
output_number_iterations_mean /= static_cast<REAL>(output_states_histogram[0]);
std::cout << "mean number of iterations " << output_number_iterations_mean << std::endl;
}
int main(int argc, char *argv[])
{
cuda_interface_example();
std::cout << std::endl << "Example completed!" << std::endl;
std::cout << "Press ENTER to exit" << std::endl;
std::getchar();
return 0;
}
|
double_itself.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void double_itself(int* A) {
int tid = threadIdx.x;
A[tid] += A[tid];
}
| double_itself.cu | __global__ void double_itself(int* A) {
int tid = threadIdx.x;
A[tid] += A[tid];
}
|
8c7f9dca5c58814e5278ae0ec5bf853fab0cdb9c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Groute: An Asynchronous Multi-GPU Programming Framework
// http://www.github.com/groute/groute
// Copyright (c) 2017, A. Barak
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the names of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <gflags/gflags.h>
#include <groute/event_pool.h>
#include <groute/distributed_worklist.h>
#include <groute/cta_work.h>
#include <groute/graphs/csr_graph.h>
#include <groute/graphs/traversal_algo.h>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/stopwatch.h>
#include "sssp_common.h"
const distance_t INF = UINT_MAX;
DEFINE_int32(source_node, 0, "The source node for the SSSP traversal (clamped to [0, nnodes-1])");
DECLARE_bool(nf);
DEFINE_int32(nf_delta, 10000, "The delta for SSSP-nf");
#define GTID (blockIdx.x * blockDim.x + threadIdx.x)
namespace sssp
{
struct DistanceData
{
index_t node;
distance_t distance;
__device__ __host__ __forceinline__ DistanceData(index_t node, distance_t distance) : node(node), distance(distance) { }
__device__ __host__ __forceinline__ DistanceData() : node(INF), distance(INF) { }
};
typedef index_t local_work_t;
typedef DistanceData remote_work_t;
struct WorkTargetRemoteWorklist
{
private:
groute::dev::CircularWorklist<remote_work_t> m_worklist;
public:
WorkTargetRemoteWorklist(groute::CircularWorklist<remote_work_t>& worklist) : m_worklist(worklist.DeviceObject()) { }
__device__ __forceinline__ void append_work(const remote_work_t& work)
{
m_worklist.append_warp(work);
}
};
struct WorkTargetDummy
{
public:
WorkTargetDummy() { }
__device__ __forceinline__ void append_work(const remote_work_t& work)
{
}
};
struct WorkTargetRemoteMark
{
private:
groute::graphs::dev::GraphDatum<mark_t> m_remote_marks;
groute::dev::Counter m_remote_counter;
public:
WorkTargetRemoteMark(
groute::graphs::dev::GraphDatum<mark_t> remote_marks,
groute::Counter& remote_counter) :
m_remote_marks(remote_marks), m_remote_counter(remote_counter.DeviceObject())
{
}
__device__ __forceinline__ void append_work(const remote_work_t& work)
{
if (m_remote_marks[work.node] == 0)
{
m_remote_marks[work.node] = 1; // mark
m_remote_counter.add_one_warp();
}
}
};
__global__ void SSSPInit(distance_t* distances, int nnodes)
{
int tid = GTID;
if (tid < nnodes)
{
distances[tid] = INF;
}
}
__global__ void SSSPInit(distance_t* distances, int nnodes, index_t source)
{
int tid = GTID;
if (tid < nnodes)
{
distances[tid] = tid == source ? 0 : INF;
}
}
template<
typename TGraph,
typename TWeightDatum, typename TDistanceDatum,
typename WorkSource, typename WorkTarget>
__global__ void SSSPKernel__NestedParallelism__(
TGraph graph,
TWeightDatum edge_weights, TDistanceDatum node_distances,
WorkSource work_source, WorkTarget work_target)
{
int tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads)
{
groute::dev::np_local<distance_t> np_local = { 0, 0, 0 };
if (i < work_size)
{
index_t node = work_source.get_work(i);
np_local.start = graph.begin_edge(node);
np_local.size = graph.end_edge(node) - np_local.start;
np_local.meta_data = node_distances.get_item(node);
}
groute::dev::CTAWorkScheduler<distance_t>::template schedule(
np_local,
[&graph, &edge_weights, &node_distances, &work_target](index_t edge, distance_t distance)
{
index_t dest = graph.edge_dest(edge);
distance_t weight = edge_weights.get_item(edge);
if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight))
{
work_target.append_work(graph, dest);
}
}
);
}
}
template<
typename TGraph,
typename TWeightDatum, typename TDistanceDatum,
typename WorkSource, typename WorkTarget>
__global__ void SSSPKernel(
TGraph graph,
TWeightDatum edge_weights, TDistanceDatum node_distances,
WorkSource work_source, WorkTarget work_target)
{
int tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads)
{
index_t node = work_source.get_work(i);
distance_t distance = node_distances.get_item(node);
for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge)
{
index_t dest = graph.edge_dest(edge);
distance_t weight = edge_weights.get_item(edge);
if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight))
{
work_target.append_work(graph, dest);
}
}
}
}
template<
typename TGraph,
template <typename> class TWeightDatum, template <typename> class TDistanceDatum,
typename WorkSource, typename WorkTarget>
__global__ void SSSPKernel__NF__NestedParallelism__(
TGraph graph,
TWeightDatum<distance_t> edge_weights, TDistanceDatum<distance_t> node_distances,
int delta,
WorkSource work_source,
groute::dev::Worklist<index_t> near_worklist, groute::dev::Worklist<index_t> far_worklist,
WorkTarget remote_work_target)
{
int tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads)
{
groute::dev::np_local<distance_t> np_local = { 0, 0, 0 };
if (i < work_size)
{
index_t node = work_source.get_work(i);
np_local.start = graph.begin_edge(node);
np_local.size = graph.end_edge(node) - np_local.start;
np_local.meta_data = node_distances.get_item(node);
}
groute::dev::CTAWorkScheduler<distance_t>::template schedule(
np_local,
[&graph, &edge_weights, &node_distances, &near_worklist, &far_worklist, &remote_work_target, delta](index_t edge, distance_t distance)
{
index_t dest = graph.edge_dest(edge);
distance_t weight = edge_weights.get_item(edge);
if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight))
{
if (graph.owns(dest))
{
int near_mask = __ballot(distance + weight <= delta ? 1 : 0);
int far_mask = __ballot(distance + weight <= delta ? 0 : 1);
if (distance + weight <= delta)
{
int near_leader = __ffs(near_mask) - 1;
int thread_offset = __popc(near_mask & ((1 << lane_id()) - 1));
near_worklist.append_warp(dest, near_leader, __popc(near_mask), thread_offset);
}
else
{
int far_leader = __ffs(far_mask) - 1;
int thread_offset = __popc(far_mask & ((1 << lane_id()) - 1));
far_worklist.append_warp(dest, far_leader, __popc(far_mask), thread_offset);
}
}
else
{
remote_work_target.append_work(DistanceData(dest, distance + weight));
}
}
}
);
}
}
template<
typename TGraph,
template <typename> class TWeightDatum, template <typename> class TDistanceDatum,
typename WorkSource, typename WorkTarget>
__global__ void SSSPKernel__NF__(
TGraph graph,
TWeightDatum<distance_t> edge_weights, TDistanceDatum<distance_t> node_distances,
int delta,
WorkSource work_source,
groute::dev::Worklist<index_t> near_worklist, groute::dev::Worklist<index_t> far_worklist,
WorkTarget remote_work_target)
{
int tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads)
{
index_t node = work_source.get_work(i);
distance_t distance = node_distances.get_item(node);
for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge)
{
index_t dest = graph.edge_dest(edge);
distance_t weight = edge_weights.get_item(edge);
if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight))
{
if (graph.owns(dest))
{
int near_mask = __ballot(distance + weight <= delta ? 1 : 0);
int far_mask = __ballot(distance + weight <= delta ? 0 : 1);
if (distance + weight <= delta)
{
int near_leader = __ffs(near_mask) - 1;
int thread_offset = __popc(near_mask & ((1 << lane_id()) - 1));
near_worklist.append_warp(dest, near_leader, __popc(near_mask), thread_offset);
}
else
{
int far_leader = __ffs(far_mask) - 1;
int thread_offset = __popc(far_mask & ((1 << lane_id()) - 1));
far_worklist.append_warp(dest, far_leader, __popc(far_mask), thread_offset);
}
}
else
{
remote_work_target.append_work(DistanceData(dest, distance + weight));
}
}
}
}
}
template<typename WorkSource>
__global__ void SSSPNearFarSplit__NF__(
int delta,
WorkSource work_source,
groute::graphs::dev::GraphDatum<distance_t> node_distances,
groute::dev::Worklist<index_t> near_worklist, groute::dev::Worklist<index_t> far_worklist)
{
int tid = GTID;
uint32_t work_size = work_source.get_size();
if (tid < work_size)
{
index_t node = work_source.get_work(tid);
distance_t distance = node_distances.get_item(node);
int near_mask = __ballot(distance <= delta ? 1 : 0);
int far_mask = __ballot(distance <= delta ? 0 : 1);
if (distance <= delta)
{
int near_leader = __ffs(near_mask) - 1;
int thread_offset = __popc(near_mask & ((1 << lane_id()) - 1));
near_worklist.append_warp(node, near_leader, __popc(near_mask), thread_offset);
}
else
{
int far_leader = __ffs(far_mask) - 1;
int thread_offset = __popc(far_mask & ((1 << lane_id()) - 1));
far_worklist.append_warp(node, far_leader, __popc(far_mask), thread_offset);
}
}
}
struct SplitOps
{
private:
groute::graphs::dev::CSRGraphSeg m_graph_seg;
groute::graphs::dev::GraphDatum<distance_t> m_distances_datum;
public:
template<typename...UnusedData>
SplitOps(
const groute::graphs::dev::CSRGraphSeg& graph_seg,
const groute::graphs::dev::GraphDatumSeg<distance_t>& weights_datum,
const groute::graphs::dev::GraphDatum<distance_t>& distances_datum,
UnusedData&... data)
: m_graph_seg(graph_seg), m_distances_datum(distances_datum)
{
}
__device__ __forceinline__ groute::SplitFlags on_receive(const remote_work_t& work)
{
if (m_graph_seg.owns(work.node))
{
return (work.distance < atomicMin(m_distances_datum.get_item_ptr(work.node), work.distance))
? groute::SF_Take
: groute::SF_None; // filter
}
return groute::SF_Pass;
}
__device__ __forceinline__ groute::SplitFlags on_send(local_work_t work)
{
return (m_graph_seg.owns(work))
? groute::SF_Take
: groute::SF_Pass;
}
__device__ __forceinline__ remote_work_t pack(local_work_t work)
{
return DistanceData(work, m_distances_datum.get_item(work));
}
__device__ __forceinline__ local_work_t unpack(const remote_work_t& work)
{
return work.node;
}
};
template<
typename TGraph,
template <typename> class TWeightDatum, template <typename> class TDistanceDatum
>
struct Problem
{
TGraph m_graph;
TWeightDatum<distance_t> m_weights_datum;
TDistanceDatum<distance_t> m_distances_datum;
public:
Problem(const TGraph& graph, const TWeightDatum<distance_t>& weights_datum, const TDistanceDatum<distance_t>& distances_datum) :
m_graph(graph), m_weights_datum(weights_datum), m_distances_datum(distances_datum)
{
}
void Init(groute::Stream& stream) const
{
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_distances_datum.size);
Marker::MarkWorkitems(m_distances_datum.size, "SSSPInit");
SSSPInit << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_distances_datum.data_ptr, m_distances_datum.size);
}
void Init(groute::Worklist<index_t>& in_wl, groute::Stream& stream) const
{
index_t source_node = min(max(0, FLAGS_source_node), m_graph.nnodes - 1);
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_distances_datum.size);
Marker::MarkWorkitems(m_distances_datum.size, "SSSPInit");
SSSPInit << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_distances_datum.data_ptr, m_distances_datum.size, source_node);
in_wl.AppendItemAsync(stream.cuda_stream, source_node); // add the first item to the worklist
}
template<typename TWorklist, bool WarpAppend = true>
void Relax(const groute::Segment<index_t>& work, TWorklist& output_worklist, groute::Stream& stream) const
{
if (work.Empty()) return;
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, work.GetSegmentSize());
if (FLAGS_cta_np)
{
Marker::MarkWorkitems(work.GetSegmentSize(), "SSSPKernel__NestedParallelism__");
SSSPKernel__NestedParallelism__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_graph, m_weights_datum, m_distances_datum,
groute::dev::WorkSourceArray<index_t>(work.GetSegmentPtr(), work.GetSegmentSize()),
WorkTargetWorklist(output_worklist)
);
}
else
{
Marker::MarkWorkitems(work.GetSegmentSize(), "SSSPKernel");
SSSPKernel << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_graph, m_weights_datum, m_distances_datum,
groute::dev::WorkSourceArray<index_t>(work.GetSegmentPtr(), work.GetSegmentSize()),
WorkTargetWorklist(output_worklist)
);
}
}
template< template <typename> class LocalWorklist, template <typename> class RemoteWorklist>
void Relax__NF__(
const groute::Segment<index_t>& work, int delta,
LocalWorklist<index_t>& near_worklist, LocalWorklist<index_t>& far_worklist,
RemoteWorklist<DistanceData>& remote_worklist, groute::Stream& stream) const
{
if (work.Empty()) return;
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, work.GetSegmentSize());
if (FLAGS_cta_np)
{
Marker::MarkWorkitems(work.GetSegmentSize(), "SSSPKernel__NF__NestedParallelism__");
SSSPKernel__NF__NestedParallelism__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_graph, m_weights_datum, m_distances_datum, delta,
groute::dev::WorkSourceArray<index_t>(work.GetSegmentPtr(), work.GetSegmentSize()),
near_worklist.DeviceObject(), far_worklist.DeviceObject(),
WorkTargetRemoteWorklist(remote_worklist)
);
}
else
{
Marker::MarkWorkitems(work.GetSegmentSize(), "SSSPKernel__NF__");
SSSPKernel__NF__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_graph, m_weights_datum, m_distances_datum, delta,
groute::dev::WorkSourceArray<index_t>(work.GetSegmentPtr(), work.GetSegmentSize()),
near_worklist.DeviceObject(), far_worklist.DeviceObject(),
WorkTargetRemoteWorklist(remote_worklist)
);
}
}
void RelaxSingle__NF__(
const groute::Segment<index_t>& work, int delta,
groute::Worklist<index_t>& near_worklist, groute::Worklist<index_t>& far_worklist,
groute::Stream& stream) const
{
if (work.Empty()) return;
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, work.GetSegmentSize());
if (FLAGS_cta_np)
{
Marker::MarkWorkitems(work.GetSegmentSize(), "SSSPKernel__NF__NestedParallelism__ (single)");
SSSPKernel__NF__NestedParallelism__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_graph, m_weights_datum, m_distances_datum, delta,
groute::dev::WorkSourceArray<index_t>(work.GetSegmentPtr(), work.GetSegmentSize()),
near_worklist.DeviceObject(), far_worklist.DeviceObject(),
WorkTargetDummy()
);
}
else
{
Marker::MarkWorkitems(work.GetSegmentSize(), "SSSP-NF Relax (single)");
SSSPKernel__NF__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_graph, m_weights_datum, m_distances_datum, delta,
groute::dev::WorkSourceArray<index_t>(work.GetSegmentPtr(), work.GetSegmentSize()),
near_worklist.DeviceObject(), far_worklist.DeviceObject(),
WorkTargetDummy()
);
}
}
uint32_t SplitRemoteInput__NF__(
const std::vector< groute::Segment<index_t> >& work_segs, int delta,
groute::Worklist<index_t>& near_worklist, groute::Worklist<index_t>& far_worklist,
groute::Stream& stream) const
{
uint32_t work_size = 0;
dim3 grid_dims, block_dims;
switch (work_segs.size())
{
case 0: break;
case 1:
work_size = work_segs[0].GetSegmentSize();
KernelSizing(grid_dims, block_dims, work_size);
SSSPNearFarSplit__NF__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
delta,
groute::dev::WorkSourceArray<index_t>(
work_segs[0].GetSegmentPtr(), work_segs[0].GetSegmentSize()),
m_distances_datum,
near_worklist.DeviceObject(), far_worklist.DeviceObject()
);
break;
case 2:
work_size = work_segs[0].GetSegmentSize() + work_segs[1].GetSegmentSize();
KernelSizing(grid_dims, block_dims, work_size);
SSSPNearFarSplit__NF__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
delta,
groute::dev::WorkSourceTwoArrays<index_t>( // using a two seg template
work_segs[0].GetSegmentPtr(), work_segs[0].GetSegmentSize(),
work_segs[1].GetSegmentPtr(), work_segs[1].GetSegmentSize()),
m_distances_datum,
near_worklist.DeviceObject(), far_worklist.DeviceObject()
);
break;
default:
printf("\n\nWarning: work_segs has more then two segments, something is wrong\n\n");
assert(false);
}
return work_size;
}
};
template<
typename Algo, typename Problem>
class SSSPSolver__NF__
{
Problem& m_problem;
std::unique_ptr< groute::Worklist<local_work_t> > m_worklist1;
std::unique_ptr< groute::Worklist<local_work_t> > m_worklist2;
public:
SSSPSolver__NF__(groute::graphs::traversal::Context<Algo>& context, Problem& problem) :
m_problem(problem)
{
size_t max_work_size = (context.host_graph.nedges / context.ngpus) * FLAGS_wl_alloc_factor;
if (FLAGS_wl_alloc_abs > 0)
max_work_size = FLAGS_wl_alloc_abs;
m_worklist1 = groute::make_unique< groute::Worklist<local_work_t> >(max_work_size);
m_worklist2 = groute::make_unique< groute::Worklist<local_work_t> >(max_work_size);
}
void Solve(
groute::graphs::traversal::Context<Algo>& context,
groute::device_t dev,
groute::DistributedWorklist<local_work_t, remote_work_t>& distributed_worklist,
groute::IDistributedWorklistPeer<local_work_t, remote_work_t>* worklist_peer,
groute::Stream& stream)
{
m_worklist1->ResetAsync(stream.cuda_stream);
m_worklist2->ResetAsync(stream.cuda_stream);
int current_delta = FLAGS_nf_delta;
auto& remote_input_worklist = worklist_peer->GetLocalInputWorklist();
auto& remote_output_worklist = worklist_peer->GetRemoteOutputWorklist();
groute::Worklist<local_work_t>* input_worklist = &worklist_peer->GetTempWorklist(); // near output worklist
groute::Worklist<local_work_t>* near_worklist = m_worklist1.get(); // near output worklist
groute::Worklist<local_work_t>* far_worklist = m_worklist2.get(); // far output worklist
groute::Segment<index_t> input_seg;
while (distributed_worklist.HasWork())
{
int overall_far_work = 0;
while (true)
{
size_t new_work = 0, performed_work = 0;
m_problem.Relax__NF__(
input_seg, current_delta, *near_worklist, *far_worklist, remote_output_worklist, stream);
performed_work += input_seg.GetSegmentSize();
// Merge remote work into the local near-far worklists
auto remote_input_segs
= input_seg.Empty()
? worklist_peer->GetLocalWork(stream) // blocking call
: remote_input_worklist.ToSegs(stream);
int remote_input_work = m_problem.SplitRemoteInput__NF__(
remote_input_segs, current_delta, *near_worklist, *far_worklist, stream);
remote_input_worklist.PopItemsAsync(remote_input_work, stream.cuda_stream);
performed_work += remote_input_work;
// Get state of near-far work
int current_near_work = near_worklist->GetLength(stream);
int current_far_work = far_worklist->GetLength(stream);
new_work += current_near_work;
new_work += (current_far_work - overall_far_work);
new_work += remote_output_worklist.GetAllocCountAndSync(stream); // get the work pushed and sync alloc-end
worklist_peer->SignalRemoteWork(context.RecordEvent(dev, stream.cuda_stream)); // signal
// Report overall work
distributed_worklist.ReportWork(
new_work,
performed_work,
Algo::Name(), dev
);
overall_far_work = current_far_work;
input_worklist->ResetAsync(stream.cuda_stream);
input_seg = groute::Segment<index_t>(near_worklist->GetDataPtr(), current_near_work);
if (input_seg.Empty()) break; // break to the far worklist
std::swap(near_worklist, input_worklist);
}
current_delta += FLAGS_nf_delta;
input_seg = groute::Segment<index_t>(far_worklist->GetDataPtr(), overall_far_work);
std::swap(far_worklist, input_worklist);
}
}
};
struct Algo
{
static const char* NameLower() { return FLAGS_nf ? "sssp-nf" : "sssp"; }
static const char* Name() { return FLAGS_nf ? "SSSP-nf" : "SSSP"; }
static void Init(
groute::graphs::traversal::Context<sssp::Algo>& context,
groute::graphs::multi::CSRGraphAllocator& graph_manager,
groute::router::Router<remote_work_t>& worklist_router,
groute::DistributedWorklist<local_work_t, remote_work_t>& distributed_worklist)
{
index_t source_node = min(max(0, FLAGS_source_node), context.host_graph.nnodes - 1);
auto partitioner = graph_manager.GetGraphPartitioner();
if (partitioner->NeedsReverseLookup())
{
source_node = partitioner->GetReverseLookupFunc()(source_node);
}
// report the initial work
distributed_worklist.ReportWork(1);
std::vector<remote_work_t> initial_work;
initial_work.push_back(remote_work_t(source_node, 0));
groute::router::ISender<remote_work_t>* work_sender = worklist_router.GetSender(groute::Device::Host);
work_sender->Send(
groute::Segment<remote_work_t>(&initial_work[0], 1), groute::Event());
work_sender->Shutdown();
}
template<
typename TGraphAllocator,
template <typename> class TWeightDatum, template <typename> class TDistanceDatum, typename...UnusedData>
static std::vector<distance_t> Gather(
TGraphAllocator& graph_allocator,
TWeightDatum<distance_t>& weights_datum, TDistanceDatum<distance_t>& distances_datum,
UnusedData&... data)
{
graph_allocator.GatherDatum(distances_datum);
return distances_datum.GetHostData();
}
template<
template <typename> class TWeightDatum,
template <typename> class TDistanceDatum,
typename...UnusedData>
static std::vector<distance_t> Host(
groute::graphs::host::CSRGraph& graph,
TWeightDatum<distance_t>& weights_datum, TDistanceDatum<distance_t>& distances_datum,
UnusedData&... data)
{
return SSSPHostNaive(graph, weights_datum.GetHostDataPtr(), min( max(0, FLAGS_source_node), graph.nnodes-1));
}
static int Output(const char *file, const std::vector<distance_t>& distances)
{
return SSSPOutput(file, distances);
}
static int CheckErrors(const std::vector<distance_t>& distances, const std::vector<distance_t>& regression)
{
return SSSPCheckErrors(distances, regression);
}
};
}
bool TestSSSPAsyncMulti__NF__(int ngpus)
{
typedef sssp::Problem<groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatumSeg, groute::graphs::dev::GraphDatum> Problem;
groute::graphs::traversal::__MultiRunner__ <
sssp::Algo,
Problem,
sssp::SSSPSolver__NF__<sssp::Algo, Problem>, // The NF solver
sssp::SplitOps,
sssp::local_work_t,
sssp::remote_work_t,
groute::graphs::multi::EdgeInputDatum<distance_t>,
groute::graphs::multi::NodeOutputGlobalDatum<distance_t> > runner;
groute::graphs::multi::EdgeInputDatum<distance_t> edge_weights;
groute::graphs::multi::NodeOutputGlobalDatum<distance_t> node_distances;
return runner(ngpus, edge_weights, node_distances);
}
bool TestSSSPAsyncMulti(int ngpus)
{
typedef sssp::Problem<groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatumSeg, groute::graphs::dev::GraphDatum> Problem;
groute::graphs::traversal::__MultiRunner__ <
sssp::Algo,
Problem,
groute::graphs::traversal::__GenericMultiSolver__<sssp::Algo, Problem, sssp::local_work_t, sssp::remote_work_t>,
sssp::SplitOps,
sssp::local_work_t,
sssp::remote_work_t,
groute::graphs::multi::EdgeInputDatum<distance_t>,
groute::graphs::multi::NodeOutputGlobalDatum<distance_t> > runner;
groute::graphs::multi::EdgeInputDatum<distance_t> edge_weights;
groute::graphs::multi::NodeOutputGlobalDatum<distance_t> node_distances;
return runner(ngpus, edge_weights, node_distances);
}
bool TestSSSPSingle__NF__()
{
typedef sssp::Problem<groute::graphs::dev::CSRGraph, groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatum> Problem;
groute::graphs::traversal::Context<sssp::Algo> context(1);
groute::graphs::single::CSRGraphAllocator
dev_graph_allocator(context.host_graph);
context.SetDevice(0);
groute::graphs::single::EdgeInputDatum<distance_t> edge_weights;
groute::graphs::single::NodeOutputDatum<distance_t> node_distances;
dev_graph_allocator.AllocateDatumObjects(edge_weights, node_distances);
context.SyncDevice(0); // graph allocations are on default streams, must sync device
Problem problem(dev_graph_allocator.DeviceObject(), edge_weights.DeviceObject(), node_distances.DeviceObject());
size_t max_work_size = context.host_graph.nedges * FLAGS_wl_alloc_factor;
if (FLAGS_wl_alloc_abs > 0)
max_work_size = FLAGS_wl_alloc_abs;
groute::Stream stream;
groute::Worklist<index_t>
wl1(max_work_size),
wl2(max_work_size),
wl3(max_work_size);
wl1.ResetAsync(stream.cuda_stream);
wl2.ResetAsync(stream.cuda_stream);
wl3.ResetAsync(stream.cuda_stream);
stream.Sync();
Stopwatch sw(true);
IntervalRangeMarker algo_rng(context.host_graph.nedges, "SSSP-nf start (hardwired single GPU)");
groute::Worklist<index_t>* input_worklist = &wl1, *near_worklist = &wl2, *far_worklist = &wl3;
problem.Init(*input_worklist, stream);
groute::Segment<index_t> work_seg;
work_seg = input_worklist->ToSeg(stream);
int current_delta = FLAGS_nf_delta;
while (!work_seg.Empty())
{
while (!work_seg.Empty())
{
problem.RelaxSingle__NF__(work_seg, current_delta, *near_worklist, *far_worklist, stream);
input_worklist->ResetAsync(stream.cuda_stream);
work_seg = near_worklist->ToSeg(stream);
std::swap(near_worklist, input_worklist);
}
current_delta += FLAGS_nf_delta;
work_seg = far_worklist->ToSeg(stream);
std::swap(far_worklist, input_worklist);
}
algo_rng.Stop();
sw.stop();
if (FLAGS_repetitions > 1)
printf("\nWarning: ignoring repetitions flag, running just one repetition (not implemented)\n");
printf("\n%s: %f ms. <filter>\n\n", sssp::Algo::Name(), sw.ms() / FLAGS_repetitions);
// Gather
auto gathered_output = sssp::Algo::Gather(dev_graph_allocator, edge_weights, node_distances);
if (FLAGS_output.length() != 0)
sssp::Algo::Output(FLAGS_output.c_str(), gathered_output);
if (FLAGS_check) {
auto regression = sssp::Algo::Host(context.host_graph, edge_weights, node_distances);
return sssp::Algo::CheckErrors(gathered_output, regression) == 0;
}
else {
printf("Warning: Result not checked\n");
return true;
}
}
bool TestSSSPSingle()
{
groute::graphs::traversal::__SingleRunner__ <
sssp::Algo,
sssp::Problem<groute::graphs::dev::CSRGraph, groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatum>,
groute::graphs::single::EdgeInputDatum<distance_t>,
groute::graphs::single::NodeOutputDatum<distance_t> > runner;
groute::graphs::single::EdgeInputDatum<distance_t> edge_weights;
groute::graphs::single::NodeOutputDatum<distance_t> node_distances;
return runner(edge_weights, node_distances);
}
| 8c7f9dca5c58814e5278ae0ec5bf853fab0cdb9c.cu | // Groute: An Asynchronous Multi-GPU Programming Framework
// http://www.github.com/groute/groute
// Copyright (c) 2017, A. Barak
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the names of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <gflags/gflags.h>
#include <groute/event_pool.h>
#include <groute/distributed_worklist.h>
#include <groute/cta_work.h>
#include <groute/graphs/csr_graph.h>
#include <groute/graphs/traversal_algo.h>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/stopwatch.h>
#include "sssp_common.h"
const distance_t INF = UINT_MAX;
DEFINE_int32(source_node, 0, "The source node for the SSSP traversal (clamped to [0, nnodes-1])");
DECLARE_bool(nf);
DEFINE_int32(nf_delta, 10000, "The delta for SSSP-nf");
#define GTID (blockIdx.x * blockDim.x + threadIdx.x)
namespace sssp
{
struct DistanceData
{
index_t node;
distance_t distance;
__device__ __host__ __forceinline__ DistanceData(index_t node, distance_t distance) : node(node), distance(distance) { }
__device__ __host__ __forceinline__ DistanceData() : node(INF), distance(INF) { }
};
typedef index_t local_work_t;
typedef DistanceData remote_work_t;
struct WorkTargetRemoteWorklist
{
private:
groute::dev::CircularWorklist<remote_work_t> m_worklist;
public:
WorkTargetRemoteWorklist(groute::CircularWorklist<remote_work_t>& worklist) : m_worklist(worklist.DeviceObject()) { }
__device__ __forceinline__ void append_work(const remote_work_t& work)
{
m_worklist.append_warp(work);
}
};
struct WorkTargetDummy
{
public:
WorkTargetDummy() { }
__device__ __forceinline__ void append_work(const remote_work_t& work)
{
}
};
struct WorkTargetRemoteMark
{
private:
groute::graphs::dev::GraphDatum<mark_t> m_remote_marks;
groute::dev::Counter m_remote_counter;
public:
WorkTargetRemoteMark(
groute::graphs::dev::GraphDatum<mark_t> remote_marks,
groute::Counter& remote_counter) :
m_remote_marks(remote_marks), m_remote_counter(remote_counter.DeviceObject())
{
}
__device__ __forceinline__ void append_work(const remote_work_t& work)
{
if (m_remote_marks[work.node] == 0)
{
m_remote_marks[work.node] = 1; // mark
m_remote_counter.add_one_warp();
}
}
};
__global__ void SSSPInit(distance_t* distances, int nnodes)
{
int tid = GTID;
if (tid < nnodes)
{
distances[tid] = INF;
}
}
__global__ void SSSPInit(distance_t* distances, int nnodes, index_t source)
{
int tid = GTID;
if (tid < nnodes)
{
distances[tid] = tid == source ? 0 : INF;
}
}
template<
typename TGraph,
typename TWeightDatum, typename TDistanceDatum,
typename WorkSource, typename WorkTarget>
__global__ void SSSPKernel__NestedParallelism__(
TGraph graph,
TWeightDatum edge_weights, TDistanceDatum node_distances,
WorkSource work_source, WorkTarget work_target)
{
int tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads)
{
groute::dev::np_local<distance_t> np_local = { 0, 0, 0 };
if (i < work_size)
{
index_t node = work_source.get_work(i);
np_local.start = graph.begin_edge(node);
np_local.size = graph.end_edge(node) - np_local.start;
np_local.meta_data = node_distances.get_item(node);
}
groute::dev::CTAWorkScheduler<distance_t>::template schedule(
np_local,
[&graph, &edge_weights, &node_distances, &work_target](index_t edge, distance_t distance)
{
index_t dest = graph.edge_dest(edge);
distance_t weight = edge_weights.get_item(edge);
if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight))
{
work_target.append_work(graph, dest);
}
}
);
}
}
template<
typename TGraph,
typename TWeightDatum, typename TDistanceDatum,
typename WorkSource, typename WorkTarget>
__global__ void SSSPKernel(
TGraph graph,
TWeightDatum edge_weights, TDistanceDatum node_distances,
WorkSource work_source, WorkTarget work_target)
{
int tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads)
{
index_t node = work_source.get_work(i);
distance_t distance = node_distances.get_item(node);
for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge)
{
index_t dest = graph.edge_dest(edge);
distance_t weight = edge_weights.get_item(edge);
if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight))
{
work_target.append_work(graph, dest);
}
}
}
}
template<
typename TGraph,
template <typename> class TWeightDatum, template <typename> class TDistanceDatum,
typename WorkSource, typename WorkTarget>
__global__ void SSSPKernel__NF__NestedParallelism__(
TGraph graph,
TWeightDatum<distance_t> edge_weights, TDistanceDatum<distance_t> node_distances,
int delta,
WorkSource work_source,
groute::dev::Worklist<index_t> near_worklist, groute::dev::Worklist<index_t> far_worklist,
WorkTarget remote_work_target)
{
int tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads)
{
groute::dev::np_local<distance_t> np_local = { 0, 0, 0 };
if (i < work_size)
{
index_t node = work_source.get_work(i);
np_local.start = graph.begin_edge(node);
np_local.size = graph.end_edge(node) - np_local.start;
np_local.meta_data = node_distances.get_item(node);
}
groute::dev::CTAWorkScheduler<distance_t>::template schedule(
np_local,
[&graph, &edge_weights, &node_distances, &near_worklist, &far_worklist, &remote_work_target, delta](index_t edge, distance_t distance)
{
index_t dest = graph.edge_dest(edge);
distance_t weight = edge_weights.get_item(edge);
if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight))
{
if (graph.owns(dest))
{
int near_mask = __ballot(distance + weight <= delta ? 1 : 0);
int far_mask = __ballot(distance + weight <= delta ? 0 : 1);
if (distance + weight <= delta)
{
int near_leader = __ffs(near_mask) - 1;
int thread_offset = __popc(near_mask & ((1 << lane_id()) - 1));
near_worklist.append_warp(dest, near_leader, __popc(near_mask), thread_offset);
}
else
{
int far_leader = __ffs(far_mask) - 1;
int thread_offset = __popc(far_mask & ((1 << lane_id()) - 1));
far_worklist.append_warp(dest, far_leader, __popc(far_mask), thread_offset);
}
}
else
{
remote_work_target.append_work(DistanceData(dest, distance + weight));
}
}
}
);
}
}
template<
typename TGraph,
template <typename> class TWeightDatum, template <typename> class TDistanceDatum,
typename WorkSource, typename WorkTarget>
__global__ void SSSPKernel__NF__(
TGraph graph,
TWeightDatum<distance_t> edge_weights, TDistanceDatum<distance_t> node_distances,
int delta,
WorkSource work_source,
groute::dev::Worklist<index_t> near_worklist, groute::dev::Worklist<index_t> far_worklist,
WorkTarget remote_work_target)
{
int tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads)
{
index_t node = work_source.get_work(i);
distance_t distance = node_distances.get_item(node);
for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge)
{
index_t dest = graph.edge_dest(edge);
distance_t weight = edge_weights.get_item(edge);
if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight))
{
if (graph.owns(dest))
{
int near_mask = __ballot(distance + weight <= delta ? 1 : 0);
int far_mask = __ballot(distance + weight <= delta ? 0 : 1);
if (distance + weight <= delta)
{
int near_leader = __ffs(near_mask) - 1;
int thread_offset = __popc(near_mask & ((1 << lane_id()) - 1));
near_worklist.append_warp(dest, near_leader, __popc(near_mask), thread_offset);
}
else
{
int far_leader = __ffs(far_mask) - 1;
int thread_offset = __popc(far_mask & ((1 << lane_id()) - 1));
far_worklist.append_warp(dest, far_leader, __popc(far_mask), thread_offset);
}
}
else
{
remote_work_target.append_work(DistanceData(dest, distance + weight));
}
}
}
}
}
template<typename WorkSource>
__global__ void SSSPNearFarSplit__NF__(
int delta,
WorkSource work_source,
groute::graphs::dev::GraphDatum<distance_t> node_distances,
groute::dev::Worklist<index_t> near_worklist, groute::dev::Worklist<index_t> far_worklist)
{
int tid = GTID;
uint32_t work_size = work_source.get_size();
if (tid < work_size)
{
index_t node = work_source.get_work(tid);
distance_t distance = node_distances.get_item(node);
int near_mask = __ballot(distance <= delta ? 1 : 0);
int far_mask = __ballot(distance <= delta ? 0 : 1);
if (distance <= delta)
{
int near_leader = __ffs(near_mask) - 1;
int thread_offset = __popc(near_mask & ((1 << lane_id()) - 1));
near_worklist.append_warp(node, near_leader, __popc(near_mask), thread_offset);
}
else
{
int far_leader = __ffs(far_mask) - 1;
int thread_offset = __popc(far_mask & ((1 << lane_id()) - 1));
far_worklist.append_warp(node, far_leader, __popc(far_mask), thread_offset);
}
}
}
struct SplitOps
{
private:
groute::graphs::dev::CSRGraphSeg m_graph_seg;
groute::graphs::dev::GraphDatum<distance_t> m_distances_datum;
public:
template<typename...UnusedData>
SplitOps(
const groute::graphs::dev::CSRGraphSeg& graph_seg,
const groute::graphs::dev::GraphDatumSeg<distance_t>& weights_datum,
const groute::graphs::dev::GraphDatum<distance_t>& distances_datum,
UnusedData&... data)
: m_graph_seg(graph_seg), m_distances_datum(distances_datum)
{
}
__device__ __forceinline__ groute::SplitFlags on_receive(const remote_work_t& work)
{
if (m_graph_seg.owns(work.node))
{
return (work.distance < atomicMin(m_distances_datum.get_item_ptr(work.node), work.distance))
? groute::SF_Take
: groute::SF_None; // filter
}
return groute::SF_Pass;
}
__device__ __forceinline__ groute::SplitFlags on_send(local_work_t work)
{
return (m_graph_seg.owns(work))
? groute::SF_Take
: groute::SF_Pass;
}
__device__ __forceinline__ remote_work_t pack(local_work_t work)
{
return DistanceData(work, m_distances_datum.get_item(work));
}
__device__ __forceinline__ local_work_t unpack(const remote_work_t& work)
{
return work.node;
}
};
template<
typename TGraph,
template <typename> class TWeightDatum, template <typename> class TDistanceDatum
>
struct Problem
{
TGraph m_graph;
TWeightDatum<distance_t> m_weights_datum;
TDistanceDatum<distance_t> m_distances_datum;
public:
Problem(const TGraph& graph, const TWeightDatum<distance_t>& weights_datum, const TDistanceDatum<distance_t>& distances_datum) :
m_graph(graph), m_weights_datum(weights_datum), m_distances_datum(distances_datum)
{
}
void Init(groute::Stream& stream) const
{
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_distances_datum.size);
Marker::MarkWorkitems(m_distances_datum.size, "SSSPInit");
SSSPInit << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_distances_datum.data_ptr, m_distances_datum.size);
}
void Init(groute::Worklist<index_t>& in_wl, groute::Stream& stream) const
{
index_t source_node = min(max(0, FLAGS_source_node), m_graph.nnodes - 1);
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_distances_datum.size);
Marker::MarkWorkitems(m_distances_datum.size, "SSSPInit");
SSSPInit << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_distances_datum.data_ptr, m_distances_datum.size, source_node);
in_wl.AppendItemAsync(stream.cuda_stream, source_node); // add the first item to the worklist
}
template<typename TWorklist, bool WarpAppend = true>
void Relax(const groute::Segment<index_t>& work, TWorklist& output_worklist, groute::Stream& stream) const
{
if (work.Empty()) return;
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, work.GetSegmentSize());
if (FLAGS_cta_np)
{
Marker::MarkWorkitems(work.GetSegmentSize(), "SSSPKernel__NestedParallelism__");
SSSPKernel__NestedParallelism__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_graph, m_weights_datum, m_distances_datum,
groute::dev::WorkSourceArray<index_t>(work.GetSegmentPtr(), work.GetSegmentSize()),
WorkTargetWorklist(output_worklist)
);
}
else
{
Marker::MarkWorkitems(work.GetSegmentSize(), "SSSPKernel");
SSSPKernel << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_graph, m_weights_datum, m_distances_datum,
groute::dev::WorkSourceArray<index_t>(work.GetSegmentPtr(), work.GetSegmentSize()),
WorkTargetWorklist(output_worklist)
);
}
}
template< template <typename> class LocalWorklist, template <typename> class RemoteWorklist>
void Relax__NF__(
const groute::Segment<index_t>& work, int delta,
LocalWorklist<index_t>& near_worklist, LocalWorklist<index_t>& far_worklist,
RemoteWorklist<DistanceData>& remote_worklist, groute::Stream& stream) const
{
if (work.Empty()) return;
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, work.GetSegmentSize());
if (FLAGS_cta_np)
{
Marker::MarkWorkitems(work.GetSegmentSize(), "SSSPKernel__NF__NestedParallelism__");
SSSPKernel__NF__NestedParallelism__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_graph, m_weights_datum, m_distances_datum, delta,
groute::dev::WorkSourceArray<index_t>(work.GetSegmentPtr(), work.GetSegmentSize()),
near_worklist.DeviceObject(), far_worklist.DeviceObject(),
WorkTargetRemoteWorklist(remote_worklist)
);
}
else
{
Marker::MarkWorkitems(work.GetSegmentSize(), "SSSPKernel__NF__");
SSSPKernel__NF__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_graph, m_weights_datum, m_distances_datum, delta,
groute::dev::WorkSourceArray<index_t>(work.GetSegmentPtr(), work.GetSegmentSize()),
near_worklist.DeviceObject(), far_worklist.DeviceObject(),
WorkTargetRemoteWorklist(remote_worklist)
);
}
}
void RelaxSingle__NF__(
const groute::Segment<index_t>& work, int delta,
groute::Worklist<index_t>& near_worklist, groute::Worklist<index_t>& far_worklist,
groute::Stream& stream) const
{
if (work.Empty()) return;
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, work.GetSegmentSize());
if (FLAGS_cta_np)
{
Marker::MarkWorkitems(work.GetSegmentSize(), "SSSPKernel__NF__NestedParallelism__ (single)");
SSSPKernel__NF__NestedParallelism__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_graph, m_weights_datum, m_distances_datum, delta,
groute::dev::WorkSourceArray<index_t>(work.GetSegmentPtr(), work.GetSegmentSize()),
near_worklist.DeviceObject(), far_worklist.DeviceObject(),
WorkTargetDummy()
);
}
else
{
Marker::MarkWorkitems(work.GetSegmentSize(), "SSSP-NF Relax (single)");
SSSPKernel__NF__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_graph, m_weights_datum, m_distances_datum, delta,
groute::dev::WorkSourceArray<index_t>(work.GetSegmentPtr(), work.GetSegmentSize()),
near_worklist.DeviceObject(), far_worklist.DeviceObject(),
WorkTargetDummy()
);
}
}
uint32_t SplitRemoteInput__NF__(
const std::vector< groute::Segment<index_t> >& work_segs, int delta,
groute::Worklist<index_t>& near_worklist, groute::Worklist<index_t>& far_worklist,
groute::Stream& stream) const
{
uint32_t work_size = 0;
dim3 grid_dims, block_dims;
switch (work_segs.size())
{
case 0: break;
case 1:
work_size = work_segs[0].GetSegmentSize();
KernelSizing(grid_dims, block_dims, work_size);
SSSPNearFarSplit__NF__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
delta,
groute::dev::WorkSourceArray<index_t>(
work_segs[0].GetSegmentPtr(), work_segs[0].GetSegmentSize()),
m_distances_datum,
near_worklist.DeviceObject(), far_worklist.DeviceObject()
);
break;
case 2:
work_size = work_segs[0].GetSegmentSize() + work_segs[1].GetSegmentSize();
KernelSizing(grid_dims, block_dims, work_size);
SSSPNearFarSplit__NF__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
delta,
groute::dev::WorkSourceTwoArrays<index_t>( // using a two seg template
work_segs[0].GetSegmentPtr(), work_segs[0].GetSegmentSize(),
work_segs[1].GetSegmentPtr(), work_segs[1].GetSegmentSize()),
m_distances_datum,
near_worklist.DeviceObject(), far_worklist.DeviceObject()
);
break;
default:
printf("\n\nWarning: work_segs has more then two segments, something is wrong\n\n");
assert(false);
}
return work_size;
}
};
template<
typename Algo, typename Problem>
class SSSPSolver__NF__
{
Problem& m_problem;
std::unique_ptr< groute::Worklist<local_work_t> > m_worklist1;
std::unique_ptr< groute::Worklist<local_work_t> > m_worklist2;
public:
SSSPSolver__NF__(groute::graphs::traversal::Context<Algo>& context, Problem& problem) :
m_problem(problem)
{
size_t max_work_size = (context.host_graph.nedges / context.ngpus) * FLAGS_wl_alloc_factor;
if (FLAGS_wl_alloc_abs > 0)
max_work_size = FLAGS_wl_alloc_abs;
m_worklist1 = groute::make_unique< groute::Worklist<local_work_t> >(max_work_size);
m_worklist2 = groute::make_unique< groute::Worklist<local_work_t> >(max_work_size);
}
void Solve(
groute::graphs::traversal::Context<Algo>& context,
groute::device_t dev,
groute::DistributedWorklist<local_work_t, remote_work_t>& distributed_worklist,
groute::IDistributedWorklistPeer<local_work_t, remote_work_t>* worklist_peer,
groute::Stream& stream)
{
m_worklist1->ResetAsync(stream.cuda_stream);
m_worklist2->ResetAsync(stream.cuda_stream);
int current_delta = FLAGS_nf_delta;
auto& remote_input_worklist = worklist_peer->GetLocalInputWorklist();
auto& remote_output_worklist = worklist_peer->GetRemoteOutputWorklist();
groute::Worklist<local_work_t>* input_worklist = &worklist_peer->GetTempWorklist(); // near output worklist
groute::Worklist<local_work_t>* near_worklist = m_worklist1.get(); // near output worklist
groute::Worklist<local_work_t>* far_worklist = m_worklist2.get(); // far output worklist
groute::Segment<index_t> input_seg;
while (distributed_worklist.HasWork())
{
int overall_far_work = 0;
while (true)
{
size_t new_work = 0, performed_work = 0;
m_problem.Relax__NF__(
input_seg, current_delta, *near_worklist, *far_worklist, remote_output_worklist, stream);
performed_work += input_seg.GetSegmentSize();
// Merge remote work into the local near-far worklists
auto remote_input_segs
= input_seg.Empty()
? worklist_peer->GetLocalWork(stream) // blocking call
: remote_input_worklist.ToSegs(stream);
int remote_input_work = m_problem.SplitRemoteInput__NF__(
remote_input_segs, current_delta, *near_worklist, *far_worklist, stream);
remote_input_worklist.PopItemsAsync(remote_input_work, stream.cuda_stream);
performed_work += remote_input_work;
// Get state of near-far work
int current_near_work = near_worklist->GetLength(stream);
int current_far_work = far_worklist->GetLength(stream);
new_work += current_near_work;
new_work += (current_far_work - overall_far_work);
new_work += remote_output_worklist.GetAllocCountAndSync(stream); // get the work pushed and sync alloc-end
worklist_peer->SignalRemoteWork(context.RecordEvent(dev, stream.cuda_stream)); // signal
// Report overall work
distributed_worklist.ReportWork(
new_work,
performed_work,
Algo::Name(), dev
);
overall_far_work = current_far_work;
input_worklist->ResetAsync(stream.cuda_stream);
input_seg = groute::Segment<index_t>(near_worklist->GetDataPtr(), current_near_work);
if (input_seg.Empty()) break; // break to the far worklist
std::swap(near_worklist, input_worklist);
}
current_delta += FLAGS_nf_delta;
input_seg = groute::Segment<index_t>(far_worklist->GetDataPtr(), overall_far_work);
std::swap(far_worklist, input_worklist);
}
}
};
struct Algo
{
static const char* NameLower() { return FLAGS_nf ? "sssp-nf" : "sssp"; }
static const char* Name() { return FLAGS_nf ? "SSSP-nf" : "SSSP"; }
static void Init(
groute::graphs::traversal::Context<sssp::Algo>& context,
groute::graphs::multi::CSRGraphAllocator& graph_manager,
groute::router::Router<remote_work_t>& worklist_router,
groute::DistributedWorklist<local_work_t, remote_work_t>& distributed_worklist)
{
index_t source_node = min(max(0, FLAGS_source_node), context.host_graph.nnodes - 1);
auto partitioner = graph_manager.GetGraphPartitioner();
if (partitioner->NeedsReverseLookup())
{
source_node = partitioner->GetReverseLookupFunc()(source_node);
}
// report the initial work
distributed_worklist.ReportWork(1);
std::vector<remote_work_t> initial_work;
initial_work.push_back(remote_work_t(source_node, 0));
groute::router::ISender<remote_work_t>* work_sender = worklist_router.GetSender(groute::Device::Host);
work_sender->Send(
groute::Segment<remote_work_t>(&initial_work[0], 1), groute::Event());
work_sender->Shutdown();
}
template<
typename TGraphAllocator,
template <typename> class TWeightDatum, template <typename> class TDistanceDatum, typename...UnusedData>
static std::vector<distance_t> Gather(
TGraphAllocator& graph_allocator,
TWeightDatum<distance_t>& weights_datum, TDistanceDatum<distance_t>& distances_datum,
UnusedData&... data)
{
graph_allocator.GatherDatum(distances_datum);
return distances_datum.GetHostData();
}
template<
template <typename> class TWeightDatum,
template <typename> class TDistanceDatum,
typename...UnusedData>
static std::vector<distance_t> Host(
groute::graphs::host::CSRGraph& graph,
TWeightDatum<distance_t>& weights_datum, TDistanceDatum<distance_t>& distances_datum,
UnusedData&... data)
{
return SSSPHostNaive(graph, weights_datum.GetHostDataPtr(), min( max(0, FLAGS_source_node), graph.nnodes-1));
}
static int Output(const char *file, const std::vector<distance_t>& distances)
{
return SSSPOutput(file, distances);
}
static int CheckErrors(const std::vector<distance_t>& distances, const std::vector<distance_t>& regression)
{
return SSSPCheckErrors(distances, regression);
}
};
}
bool TestSSSPAsyncMulti__NF__(int ngpus)
{
typedef sssp::Problem<groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatumSeg, groute::graphs::dev::GraphDatum> Problem;
groute::graphs::traversal::__MultiRunner__ <
sssp::Algo,
Problem,
sssp::SSSPSolver__NF__<sssp::Algo, Problem>, // The NF solver
sssp::SplitOps,
sssp::local_work_t,
sssp::remote_work_t,
groute::graphs::multi::EdgeInputDatum<distance_t>,
groute::graphs::multi::NodeOutputGlobalDatum<distance_t> > runner;
groute::graphs::multi::EdgeInputDatum<distance_t> edge_weights;
groute::graphs::multi::NodeOutputGlobalDatum<distance_t> node_distances;
return runner(ngpus, edge_weights, node_distances);
}
bool TestSSSPAsyncMulti(int ngpus)
{
typedef sssp::Problem<groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatumSeg, groute::graphs::dev::GraphDatum> Problem;
groute::graphs::traversal::__MultiRunner__ <
sssp::Algo,
Problem,
groute::graphs::traversal::__GenericMultiSolver__<sssp::Algo, Problem, sssp::local_work_t, sssp::remote_work_t>,
sssp::SplitOps,
sssp::local_work_t,
sssp::remote_work_t,
groute::graphs::multi::EdgeInputDatum<distance_t>,
groute::graphs::multi::NodeOutputGlobalDatum<distance_t> > runner;
groute::graphs::multi::EdgeInputDatum<distance_t> edge_weights;
groute::graphs::multi::NodeOutputGlobalDatum<distance_t> node_distances;
return runner(ngpus, edge_weights, node_distances);
}
bool TestSSSPSingle__NF__()
{
typedef sssp::Problem<groute::graphs::dev::CSRGraph, groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatum> Problem;
groute::graphs::traversal::Context<sssp::Algo> context(1);
groute::graphs::single::CSRGraphAllocator
dev_graph_allocator(context.host_graph);
context.SetDevice(0);
groute::graphs::single::EdgeInputDatum<distance_t> edge_weights;
groute::graphs::single::NodeOutputDatum<distance_t> node_distances;
dev_graph_allocator.AllocateDatumObjects(edge_weights, node_distances);
context.SyncDevice(0); // graph allocations are on default streams, must sync device
Problem problem(dev_graph_allocator.DeviceObject(), edge_weights.DeviceObject(), node_distances.DeviceObject());
size_t max_work_size = context.host_graph.nedges * FLAGS_wl_alloc_factor;
if (FLAGS_wl_alloc_abs > 0)
max_work_size = FLAGS_wl_alloc_abs;
groute::Stream stream;
groute::Worklist<index_t>
wl1(max_work_size),
wl2(max_work_size),
wl3(max_work_size);
wl1.ResetAsync(stream.cuda_stream);
wl2.ResetAsync(stream.cuda_stream);
wl3.ResetAsync(stream.cuda_stream);
stream.Sync();
Stopwatch sw(true);
IntervalRangeMarker algo_rng(context.host_graph.nedges, "SSSP-nf start (hardwired single GPU)");
groute::Worklist<index_t>* input_worklist = &wl1, *near_worklist = &wl2, *far_worklist = &wl3;
problem.Init(*input_worklist, stream);
groute::Segment<index_t> work_seg;
work_seg = input_worklist->ToSeg(stream);
int current_delta = FLAGS_nf_delta;
while (!work_seg.Empty())
{
while (!work_seg.Empty())
{
problem.RelaxSingle__NF__(work_seg, current_delta, *near_worklist, *far_worklist, stream);
input_worklist->ResetAsync(stream.cuda_stream);
work_seg = near_worklist->ToSeg(stream);
std::swap(near_worklist, input_worklist);
}
current_delta += FLAGS_nf_delta;
work_seg = far_worklist->ToSeg(stream);
std::swap(far_worklist, input_worklist);
}
algo_rng.Stop();
sw.stop();
if (FLAGS_repetitions > 1)
printf("\nWarning: ignoring repetitions flag, running just one repetition (not implemented)\n");
printf("\n%s: %f ms. <filter>\n\n", sssp::Algo::Name(), sw.ms() / FLAGS_repetitions);
// Gather
auto gathered_output = sssp::Algo::Gather(dev_graph_allocator, edge_weights, node_distances);
if (FLAGS_output.length() != 0)
sssp::Algo::Output(FLAGS_output.c_str(), gathered_output);
if (FLAGS_check) {
auto regression = sssp::Algo::Host(context.host_graph, edge_weights, node_distances);
return sssp::Algo::CheckErrors(gathered_output, regression) == 0;
}
else {
printf("Warning: Result not checked\n");
return true;
}
}
bool TestSSSPSingle()
{
groute::graphs::traversal::__SingleRunner__ <
sssp::Algo,
sssp::Problem<groute::graphs::dev::CSRGraph, groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatum>,
groute::graphs::single::EdgeInputDatum<distance_t>,
groute::graphs::single::NodeOutputDatum<distance_t> > runner;
groute::graphs::single::EdgeInputDatum<distance_t> edge_weights;
groute::graphs::single::NodeOutputDatum<distance_t> node_distances;
return runner(edge_weights, node_distances);
}
|
bd118240da523255392eb452e20644a78b6650b0.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <iostream>
#include <memory>
#include <stdexcept>
#include <string>
#include <array>
#include <regex>
#include <hip/hip_runtime.h>
#include <assert.h>
#include "../../GpuMatrix.hpp"
#include "generalInformation.hpp"
// #include <helper_cuda.h>
// #include <helper_functions.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#define assertm(exp, msg) assert(((void)msg, exp))
// typedef struct {
// int warpsSize;
// int maxThreadsPerBlock;
// size_t totalGlobalMem;
// int maxThreadsDim[3];
// int maxGridSize[3];
// int maxThreadsPerMultiProcessor;
// } KernelConfiguration;
hipDeviceProp_t deviceProps;
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
std::cerr << "Error during configuration : " << hipGetErrorName(code) << "\nDefinition error : " << hipGetErrorString(code) << std::endl;
if (abort) { exit(code); }
}
}
std::string exec(const char* cmd) {
std::array<char, 128> buffer;
std::string result;
std::unique_ptr<FILE, decltype(&pclose)> pipe(popen(cmd, "r"), pclose);
assertm(pipe, "popen() failed");
while (fgets(buffer.data(), buffer.size(), pipe.get()) != nullptr) {
result += buffer.data();
}
return result;
}
void sofwareVerification(void){
std::cout << "Currently processing to the device verification and configuration for the GPU operations " << std::endl;
// #if !defined(nvcc)
// #error "nvcc is not defined as an environnement variable, please do it "
// #endif
const char* cmd = "nvcc --version";
std::string result = exec(cmd);
std::regex getVersion("\\sV(.*)");
std::smatch version;
// std::cout << version[1].str() << std::endl;
std::regex_search(result, version, getVersion);
float numericVersion{};
try {
numericVersion = std::stof(version[1].str());
}
catch (std::out_of_range& e){
std::cerr << "Error regarding the access to cuda software please make sure nvcc is defined as environnement variable" << std::endl;
exit(0);
}
if (numericVersion <= 8) {
std::cerr << "Your cuda version is obsolete please upgrade it to at least version 8.0" << std::endl;
exit(0);
}
std::cout << "Cuda sotware identified ++++++++++++++++++\n" << result << std::endl;
}
void matrixGPU_init(bool verbose = false){
if (!(verbose)){
std::cout.setstate(std::ios_base::failbit);
}
sofwareVerification();
int* device = new int;
gpuErrchk(hipGetDeviceCount(device));
if (*device > 1) {
std::cout << "Warning this library only uses one GPU, wait for an update..." << std::endl;
}
gpuErrchk(hipGetDevice(device));
gpuErrchk(hipGetDeviceProperties(&deviceProps, *device));
std::cout << "CUDA device " << deviceProps.name << std::endl;
delete device;
if (!(verbose)){
std::cout.clear();
}
}
template <typename T>
void GpuMatrix<T>::matrixGPU_print(unsigned int rows, unsigned int columns, bool isTop, bool isLeft){
assertm((this->ROWS >= rows && this->COLUMNS >= columns), "error : rows and columns in arguments are higher than matrix size ");
std::cout << "Matrix dimensions are [" << this->ROWS << "," << this->COLUMNS << "] - displaying a "<< rows << "x" << columns << " insight //" << std::endl;
switch (isTop){
case true:
if (isLeft) {
// in this case we'll print the rows first rows and the columns first columns
for (unsigned int i = 0; i<rows; i++){
for (unsigned int j = 0; j<columns; j++){
std::cout.width(5); std::cout << this->data[i*this->COLUMNS+j] << " " << std::flush;
}
std::cout << std::endl;
}
}
else {
// in this case we'll print the rows first rows and the columns last columns
for (unsigned int i = 1; i<rows; i++){
for (unsigned int j = (this->COLUMNS-columns); j<this->COLUMNS; j++){
std::cout.width(5); std::cout << this->data[i*this->COLUMNS+j] << " " << std::flush;
}
std::cout << std::endl;
}
}
break;
case false:
if (isLeft){
for (unsigned int i = this->ROWS-rows; i<this->ROWS; i++){
for (unsigned int j = 0; j<columns; j++){
std::cout.width(5); std::cout << this->data[i*this->COLUMNS+j] << " " << std::flush;
}
std::cout << std::endl;
}
}
else {
for (unsigned int i = this->ROWS-rows; i<this->ROWS; i++){
for (unsigned int j = (this->COLUMNS-columns); j<this->COLUMNS; j++){
std::cout.width(5); std::cout << this->data[i*this->COLUMNS+j] << " " << std::flush;
}
std::cout << std::endl;
}
}
break;
}
printf("\n\n");
}
| bd118240da523255392eb452e20644a78b6650b0.cu | #include <cstdio>
#include <iostream>
#include <memory>
#include <stdexcept>
#include <string>
#include <array>
#include <regex>
#include <cuda_runtime.h>
#include <assert.h>
#include "../../GpuMatrix.hpp"
#include "generalInformation.hpp"
// #include <helper_cuda.h>
// #include <helper_functions.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#define assertm(exp, msg) assert(((void)msg, exp))
// typedef struct {
// int warpsSize;
// int maxThreadsPerBlock;
// size_t totalGlobalMem;
// int maxThreadsDim[3];
// int maxGridSize[3];
// int maxThreadsPerMultiProcessor;
// } KernelConfiguration;
cudaDeviceProp deviceProps;
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
std::cerr << "Error during configuration : " << cudaGetErrorName(code) << "\nDefinition error : " << cudaGetErrorString(code) << std::endl;
if (abort) { exit(code); }
}
}
std::string exec(const char* cmd) {
std::array<char, 128> buffer;
std::string result;
std::unique_ptr<FILE, decltype(&pclose)> pipe(popen(cmd, "r"), pclose);
assertm(pipe, "popen() failed");
while (fgets(buffer.data(), buffer.size(), pipe.get()) != nullptr) {
result += buffer.data();
}
return result;
}
void sofwareVerification(void){
std::cout << "Currently processing to the device verification and configuration for the GPU operations " << std::endl;
// #if !defined(nvcc)
// #error "nvcc is not defined as an environnement variable, please do it "
// #endif
const char* cmd = "nvcc --version";
std::string result = exec(cmd);
std::regex getVersion("\\sV(.*)");
std::smatch version;
// std::cout << version[1].str() << std::endl;
std::regex_search(result, version, getVersion);
float numericVersion{};
try {
numericVersion = std::stof(version[1].str());
}
catch (std::out_of_range& e){
std::cerr << "Error regarding the access to cuda software please make sure nvcc is defined as environnement variable" << std::endl;
exit(0);
}
if (numericVersion <= 8) {
std::cerr << "Your cuda version is obsolete please upgrade it to at least version 8.0" << std::endl;
exit(0);
}
std::cout << "Cuda sotware identified ++++++++++++++++++\n" << result << std::endl;
}
void matrixGPU_init(bool verbose = false){
if (!(verbose)){
std::cout.setstate(std::ios_base::failbit);
}
sofwareVerification();
int* device = new int;
gpuErrchk(cudaGetDeviceCount(device));
if (*device > 1) {
std::cout << "Warning this library only uses one GPU, wait for an update..." << std::endl;
}
gpuErrchk(cudaGetDevice(device));
gpuErrchk(cudaGetDeviceProperties(&deviceProps, *device));
std::cout << "CUDA device " << deviceProps.name << std::endl;
delete device;
if (!(verbose)){
std::cout.clear();
}
}
template <typename T>
void GpuMatrix<T>::matrixGPU_print(unsigned int rows, unsigned int columns, bool isTop, bool isLeft){
assertm((this->ROWS >= rows && this->COLUMNS >= columns), "error : rows and columns in arguments are higher than matrix size ");
std::cout << "Matrix dimensions are [" << this->ROWS << "," << this->COLUMNS << "] - displaying a "<< rows << "x" << columns << " insight //" << std::endl;
switch (isTop){
case true:
if (isLeft) {
// in this case we'll print the rows first rows and the columns first columns
for (unsigned int i = 0; i<rows; i++){
for (unsigned int j = 0; j<columns; j++){
std::cout.width(5); std::cout << this->data[i*this->COLUMNS+j] << " " << std::flush;
}
std::cout << std::endl;
}
}
else {
// in this case we'll print the rows first rows and the columns last columns
for (unsigned int i = 1; i<rows; i++){
for (unsigned int j = (this->COLUMNS-columns); j<this->COLUMNS; j++){
std::cout.width(5); std::cout << this->data[i*this->COLUMNS+j] << " " << std::flush;
}
std::cout << std::endl;
}
}
break;
case false:
if (isLeft){
for (unsigned int i = this->ROWS-rows; i<this->ROWS; i++){
for (unsigned int j = 0; j<columns; j++){
std::cout.width(5); std::cout << this->data[i*this->COLUMNS+j] << " " << std::flush;
}
std::cout << std::endl;
}
}
else {
for (unsigned int i = this->ROWS-rows; i<this->ROWS; i++){
for (unsigned int j = (this->COLUMNS-columns); j<this->COLUMNS; j++){
std::cout.width(5); std::cout << this->data[i*this->COLUMNS+j] << " " << std::flush;
}
std::cout << std::endl;
}
}
break;
}
printf("\n\n");
}
|
ea3d8a59ebe1901bdb535dffc95e0c4445d61e93.hip | // !!! This is a file automatically generated by hipify!!!
//fail
//--blockDim=32 --gridDim=64 --no-inline
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <assert.h>
#define N 2//32
__device__ void f(float *odata, int* ai) {
int thid = threadIdx.x;
*ai = thid;
odata[*ai] = 2*threadIdx.x;
}
__global__ void k(float *g_odata) {
int ai;
f(g_odata,&ai);
}
| ea3d8a59ebe1901bdb535dffc95e0c4445d61e93.cu | //fail
//--blockDim=32 --gridDim=64 --no-inline
#include <cuda.h>
#include <stdio.h>
#include <assert.h>
#define N 2//32
__device__ void f(float *odata, int* ai) {
int thid = threadIdx.x;
*ai = thid;
odata[*ai] = 2*threadIdx.x;
}
__global__ void k(float *g_odata) {
int ai;
f(g_odata,&ai);
}
|
36e8ce136bfa499c6da007f9e6ab3d444464b671.hip | // !!! This is a file automatically generated by hipify!!!
//Author: Alexander G. Schwing (http://alexander-schwing.de)
#ifdef _MSC_VER
#pragma warning( disable : 4661 )
#endif
#include "Function_ConvSub.h"
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "../LSDN_CudaCommon.h"
#include "LSDN_mathfunctions.h"
template <typename T>
__global__ void kernel_AccumPatchDiff2ImSubsample(const int num, T* img, int numRow, int numCol, int numChan,
int kNumRow, int kNumCol, int stride, int pad, const T* patchMatrixDiff, int h_out, int w_out, int subsample_h, int subsample_w) {
CUDA_KERNEL_LOOP(index, num) {//loops over channels*numImageRows*numImageCols
T res = 0;
//coordinates in padded grid
int h = index % numRow + pad;// +(kNumRow - 1)*(subsample_h / 2) - (subsample_h > 1)*pad;
int w = (index / numRow) % numCol + pad;// +(kNumCol - 1)*(subsample_w / 2) - (subsample_w > 1)*pad;
int c = index / (numRow * numCol);
//where did pixel (index%numRow, (index/numRow)%numCol) have an influence in the patchmatrix?
//the ranges in patchMatrixDiff that affect the current position (h,w) in img
int h_out_start = (h < kNumRow*subsample_h) ? h%subsample_h : (h - kNumRow*subsample_h) / stride + subsample_h;
int h_out_end = min(h / stride + 1, h_out);
int w_out_start = (w < kNumCol*subsample_w) ? w%subsample_w : (w - kNumCol*subsample_w) / stride + subsample_w;
int w_out_end = min(w / stride + 1, w_out);
for (int w_c = w_out_start; w_c < w_out_end; w_c+=subsample_w) {
for (int h_c = h_out_start; h_c < h_out_end; h_c+=subsample_h) {//height offset for grid in padded dimensions
int c_out = c * kNumRow * kNumCol + ((w - w_c * stride)/subsample_w) * kNumRow + (h - h_c * stride)/subsample_h;
res += patchMatrixDiff[(c_out * w_out + w_c) * h_out + h_c];
}
}
//printf("%f ", patchMatrixDiff[0]);
/*//equivalent implementation, fewer multiplications within for loops
int offset = (c * kNumRow * kNumCol + w * kNumRow + h) * w_out * h_out;
int coeff_h_out = (1 - stride * w_out * h_out);
int coeff_w_out = (1 - stride * kNumRow * w_out) * h_out;
for (int w_c = w_out_start; w_c < w_out_end; ++w_c) {
for (int h_c = h_out_start; h_c < h_out_end; ++h_c) {
res += patchMatrixDiff[offset + w_c*coeff_w_out + h_c*coeff_h_out];
}
}
*/
img[index] = res;
}
}
template <class N>
void ConvSubFunction<N>::AccumPatchDiff2Im(i2t<true>, ValueType* img, SizeType numRow, SizeType numCol, SizeType numChan, SizeType kNumRow, SizeType kNumCol) {
//accumulated the gradients of extracted patches back to img
//SizeType h_out = (numRow + 2 * padSize_ - kNumRow*SubsampleH_ + ((SubsampleH_>1) ? 1 : 0)) / stride_ + 1;
//SizeType w_out = (numCol + 2 * padSize_ - kNumCol*SubsampleW_ + ((SubsampleW_>1) ? 1 : 0)) / stride_ + 1;
SizeType h_out = (numRow + 2 * padSize_ - ((kNumRow - 1)*SubsampleH_ + 1)) / stride_ + 1;
SizeType w_out = (numCol + 2 * padSize_ - ((kNumCol - 1)*SubsampleW_ + 1)) / stride_ + 1;
SizeType num_kernels = numChan * numRow * numCol;
// To avoid involving atomic operations, we launch one kernel per
// input dimension, and then in the kernel add up the output dimensions.
kernel_AccumPatchDiff2ImSubsample<ValueType> << <LSDN_GET_BLOCKS(num_kernels), LSDN_CUDA_NUM_THREADS >> >(int(num_kernels), img,
int(numRow), int(numCol), int(numChan), int(kNumRow), int(kNumCol), int(stride_), int(padSize_),
patchMatrixDiff_, int(h_out), int(w_out), int(SubsampleH_), int(SubsampleW_));
check_cuda_errors(__FILE__, __LINE__);
}
template <typename T>
__global__ void kernel_Im2PatchesSubsample(const int num, const T* img, int numRow, int numCol,
int kNumRow, int kNumCol, int stride, int pad, T* patchMatrix, int h_out, int w_out, int subsample_h, int subsample_w) {
CUDA_KERNEL_LOOP(index, num) {//loops over channels*h_out*w_out
int h_d = index % h_out;
index /= h_out;
int w_d = index % w_out;//width-destination
int c_s = index / w_out;//channel-source
int c_d = c_s * kNumRow * kNumCol;//channel-destination offset
int h_s = h_d * stride - pad;// -(kNumRow - 1)*(subsample_h / 2) + (subsample_h > 1)*pad;
int w_s = w_d * stride - pad;// -(kNumCol - 1)*(subsample_w / 2) + (subsample_w > 1)*pad;
patchMatrix += (c_d * w_out + w_d) * h_out + h_d;
img += (c_s * numCol + w_s) * numRow + h_s;
for (int c = 0; c < kNumCol; ++c) {
for (int r = 0; r < kNumRow; ++r) {
int h = h_s + r*subsample_h;
int w = w_s + c*subsample_w;
*patchMatrix = (h >= 0 && h<numRow && w >= 0 && w<numCol) ? img[c*subsample_w*numRow + r*subsample_h] : 0;
patchMatrix += w_out * h_out;
}
}
}
}
template <class N>
void ConvSubFunction<N>::Im2Patches(i2t<true>, ValueType* img, SizeType numRow, SizeType numCol, SizeType numChan, SizeType kNumRow, SizeType kNumCol) {
//extract patches of size kNumRow-kNumCol-numChannel from img, and concatenate them into a matrix
//each row stores the column vectorized of each patch
//each kernel copies a single-channel grid (i.e., a kNumRow-kNumCol region)
//SizeType h_out = (numRow + 2 * padSize_ - kNumRow*SubsampleH_ + ((SubsampleH_>1)?1:0)) / stride_ + 1;
//SizeType w_out = (numCol + 2 * padSize_ - kNumCol*SubsampleW_ + ((SubsampleW_>1)?1:0)) / stride_ + 1;
SizeType h_out = (numRow + 2 * padSize_ - ((kNumRow - 1)*SubsampleH_ + 1)) / stride_ + 1;
SizeType w_out = (numCol + 2 * padSize_ - ((kNumCol - 1)*SubsampleW_ + 1)) / stride_ + 1;
SizeType num_kernels = numChan * h_out * w_out;
check_cuda_errors(__FILE__, __LINE__);
kernel_Im2PatchesSubsample<ValueType> << <LSDN_GET_BLOCKS(num_kernels), LSDN_CUDA_NUM_THREADS >> >(int(num_kernels), img,
int(numRow), int(numCol), int(kNumRow), int(kNumCol), int(stride_), int(padSize_),
patchMatrix_, int(h_out), int(w_out), int(SubsampleH_), int(SubsampleW_));
check_cuda_errors(__FILE__, __LINE__);
}
template <typename T>
__global__ void kernel_AdditionModuloOperand(T* res, int numEl, const T* addend, int op_division, int op_modulo) {
CUDA_KERNEL_LOOP(index, numEl) {
int ix = (index / op_division) % op_modulo;
res[index] += addend[ix];
}
}
template <class N>
void ConvSubFunction<N>::AdditionModuloOperand(i2t<true>, ValueType* res, SizeType numEl, ValueType* addend, SizeType op_division, SizeType op_modulo) {
kernel_AdditionModuloOperand<ValueType> << <LSDN_GET_BLOCKS(numEl), LSDN_CUDA_NUM_THREADS >> >(res, numEl, addend, op_division, op_modulo);
check_cuda_errors(__FILE__, __LINE__);
}
template <typename T>
__global__ void kernel_BiasDerivativeSingleDim(T* res, const T* input, const int patchSize, const int numSamples, const int numChannels, const int sampleSize, bool performAddition) {
if (performAddition) {
CUDA_KERNEL_LOOP(index, numChannels) {
const T* ptr = input + patchSize*index;
for (int k = 0; k < patchSize*numSamples; ++k) {
int sample = k / patchSize;
int offset = k % patchSize;
res[index] += ptr[sampleSize*sample + offset];
}
}
} else {
CUDA_KERNEL_LOOP(index, numChannels) {
const T* ptr = input + patchSize*index;
res[index] = T(0);
for (int k = 0; k < patchSize*numSamples; ++k) {
int sample = k / patchSize;
int offset = k % patchSize;
res[index] += ptr[sampleSize*sample + offset];
}
}
}
}
template <class N>
void ConvSubFunction<N>::BiasDerivativeSingleDim(i2t<true>, ValueType* res, ValueType* input, SizeType patchSize, SizeType numSamples, SizeType numChannels, bool performAddition) {
kernel_BiasDerivativeSingleDim<ValueType> << <LSDN_GET_BLOCKS(numChannels), LSDN_CUDA_NUM_THREADS >> >(res, input, patchSize, numSamples, numChannels, patchSize*numChannels, performAddition);
check_cuda_errors(__FILE__, __LINE__);
}
template <typename T>
__global__ void kernel_BiasDerivativeMultiDim(T* res, const T* input, const int sampleSize, const int numSamples, bool performAddition) {
if (performAddition) {
CUDA_KERNEL_LOOP(index, sampleSize) {
for (int k = 0; k < numSamples; ++k) {
res[index] += input[k*sampleSize + index];
}
}
} else {
CUDA_KERNEL_LOOP(index, sampleSize) {
res[index] = T(0);
for (int k = 0; k < numSamples; ++k) {
res[index] += input[k*sampleSize + index];
}
}
}
}
template <class N>
void ConvSubFunction<N>::BiasDerivativeMultiDim(i2t<true>, ValueType* res, ValueType* input, SizeType sampleSize, SizeType numSamples, bool performAddition) {
kernel_BiasDerivativeMultiDim<ValueType> << <LSDN_GET_BLOCKS(sampleSize), LSDN_CUDA_NUM_THREADS >> >(res, input, sampleSize, numSamples, performAddition);
check_cuda_errors(__FILE__, __LINE__);
}
template class ConvSubFunction<Node<double, int, false> >;
template class ConvSubFunction<Node<double, int, true> >;
template class ConvSubFunction<Node<float, int, false> >;
template class ConvSubFunction<Node<float, int, true> >;
| 36e8ce136bfa499c6da007f9e6ab3d444464b671.cu | //Author: Alexander G. Schwing (http://alexander-schwing.de)
#ifdef _MSC_VER
#pragma warning( disable : 4661 )
#endif
#include "Function_ConvSub.h"
#include "cuda_runtime.h"
#include "cublas_v2.h"
#include "../LSDN_CudaCommon.h"
#include "LSDN_mathfunctions.h"
template <typename T>
__global__ void kernel_AccumPatchDiff2ImSubsample(const int num, T* img, int numRow, int numCol, int numChan,
int kNumRow, int kNumCol, int stride, int pad, const T* patchMatrixDiff, int h_out, int w_out, int subsample_h, int subsample_w) {
CUDA_KERNEL_LOOP(index, num) {//loops over channels*numImageRows*numImageCols
T res = 0;
//coordinates in padded grid
int h = index % numRow + pad;// +(kNumRow - 1)*(subsample_h / 2) - (subsample_h > 1)*pad;
int w = (index / numRow) % numCol + pad;// +(kNumCol - 1)*(subsample_w / 2) - (subsample_w > 1)*pad;
int c = index / (numRow * numCol);
//where did pixel (index%numRow, (index/numRow)%numCol) have an influence in the patchmatrix?
//the ranges in patchMatrixDiff that affect the current position (h,w) in img
int h_out_start = (h < kNumRow*subsample_h) ? h%subsample_h : (h - kNumRow*subsample_h) / stride + subsample_h;
int h_out_end = min(h / stride + 1, h_out);
int w_out_start = (w < kNumCol*subsample_w) ? w%subsample_w : (w - kNumCol*subsample_w) / stride + subsample_w;
int w_out_end = min(w / stride + 1, w_out);
for (int w_c = w_out_start; w_c < w_out_end; w_c+=subsample_w) {
for (int h_c = h_out_start; h_c < h_out_end; h_c+=subsample_h) {//height offset for grid in padded dimensions
int c_out = c * kNumRow * kNumCol + ((w - w_c * stride)/subsample_w) * kNumRow + (h - h_c * stride)/subsample_h;
res += patchMatrixDiff[(c_out * w_out + w_c) * h_out + h_c];
}
}
//printf("%f ", patchMatrixDiff[0]);
/*//equivalent implementation, fewer multiplications within for loops
int offset = (c * kNumRow * kNumCol + w * kNumRow + h) * w_out * h_out;
int coeff_h_out = (1 - stride * w_out * h_out);
int coeff_w_out = (1 - stride * kNumRow * w_out) * h_out;
for (int w_c = w_out_start; w_c < w_out_end; ++w_c) {
for (int h_c = h_out_start; h_c < h_out_end; ++h_c) {
res += patchMatrixDiff[offset + w_c*coeff_w_out + h_c*coeff_h_out];
}
}
*/
img[index] = res;
}
}
template <class N>
void ConvSubFunction<N>::AccumPatchDiff2Im(i2t<true>, ValueType* img, SizeType numRow, SizeType numCol, SizeType numChan, SizeType kNumRow, SizeType kNumCol) {
//accumulated the gradients of extracted patches back to img
//SizeType h_out = (numRow + 2 * padSize_ - kNumRow*SubsampleH_ + ((SubsampleH_>1) ? 1 : 0)) / stride_ + 1;
//SizeType w_out = (numCol + 2 * padSize_ - kNumCol*SubsampleW_ + ((SubsampleW_>1) ? 1 : 0)) / stride_ + 1;
SizeType h_out = (numRow + 2 * padSize_ - ((kNumRow - 1)*SubsampleH_ + 1)) / stride_ + 1;
SizeType w_out = (numCol + 2 * padSize_ - ((kNumCol - 1)*SubsampleW_ + 1)) / stride_ + 1;
SizeType num_kernels = numChan * numRow * numCol;
// To avoid involving atomic operations, we launch one kernel per
// input dimension, and then in the kernel add up the output dimensions.
kernel_AccumPatchDiff2ImSubsample<ValueType> << <LSDN_GET_BLOCKS(num_kernels), LSDN_CUDA_NUM_THREADS >> >(int(num_kernels), img,
int(numRow), int(numCol), int(numChan), int(kNumRow), int(kNumCol), int(stride_), int(padSize_),
patchMatrixDiff_, int(h_out), int(w_out), int(SubsampleH_), int(SubsampleW_));
check_cuda_errors(__FILE__, __LINE__);
}
template <typename T>
__global__ void kernel_Im2PatchesSubsample(const int num, const T* img, int numRow, int numCol,
int kNumRow, int kNumCol, int stride, int pad, T* patchMatrix, int h_out, int w_out, int subsample_h, int subsample_w) {
CUDA_KERNEL_LOOP(index, num) {//loops over channels*h_out*w_out
int h_d = index % h_out;
index /= h_out;
int w_d = index % w_out;//width-destination
int c_s = index / w_out;//channel-source
int c_d = c_s * kNumRow * kNumCol;//channel-destination offset
int h_s = h_d * stride - pad;// -(kNumRow - 1)*(subsample_h / 2) + (subsample_h > 1)*pad;
int w_s = w_d * stride - pad;// -(kNumCol - 1)*(subsample_w / 2) + (subsample_w > 1)*pad;
patchMatrix += (c_d * w_out + w_d) * h_out + h_d;
img += (c_s * numCol + w_s) * numRow + h_s;
for (int c = 0; c < kNumCol; ++c) {
for (int r = 0; r < kNumRow; ++r) {
int h = h_s + r*subsample_h;
int w = w_s + c*subsample_w;
*patchMatrix = (h >= 0 && h<numRow && w >= 0 && w<numCol) ? img[c*subsample_w*numRow + r*subsample_h] : 0;
patchMatrix += w_out * h_out;
}
}
}
}
template <class N>
void ConvSubFunction<N>::Im2Patches(i2t<true>, ValueType* img, SizeType numRow, SizeType numCol, SizeType numChan, SizeType kNumRow, SizeType kNumCol) {
//extract patches of size kNumRow-kNumCol-numChannel from img, and concatenate them into a matrix
//each row stores the column vectorized of each patch
//each kernel copies a single-channel grid (i.e., a kNumRow-kNumCol region)
//SizeType h_out = (numRow + 2 * padSize_ - kNumRow*SubsampleH_ + ((SubsampleH_>1)?1:0)) / stride_ + 1;
//SizeType w_out = (numCol + 2 * padSize_ - kNumCol*SubsampleW_ + ((SubsampleW_>1)?1:0)) / stride_ + 1;
SizeType h_out = (numRow + 2 * padSize_ - ((kNumRow - 1)*SubsampleH_ + 1)) / stride_ + 1;
SizeType w_out = (numCol + 2 * padSize_ - ((kNumCol - 1)*SubsampleW_ + 1)) / stride_ + 1;
SizeType num_kernels = numChan * h_out * w_out;
check_cuda_errors(__FILE__, __LINE__);
kernel_Im2PatchesSubsample<ValueType> << <LSDN_GET_BLOCKS(num_kernels), LSDN_CUDA_NUM_THREADS >> >(int(num_kernels), img,
int(numRow), int(numCol), int(kNumRow), int(kNumCol), int(stride_), int(padSize_),
patchMatrix_, int(h_out), int(w_out), int(SubsampleH_), int(SubsampleW_));
check_cuda_errors(__FILE__, __LINE__);
}
template <typename T>
__global__ void kernel_AdditionModuloOperand(T* res, int numEl, const T* addend, int op_division, int op_modulo) {
CUDA_KERNEL_LOOP(index, numEl) {
int ix = (index / op_division) % op_modulo;
res[index] += addend[ix];
}
}
template <class N>
void ConvSubFunction<N>::AdditionModuloOperand(i2t<true>, ValueType* res, SizeType numEl, ValueType* addend, SizeType op_division, SizeType op_modulo) {
kernel_AdditionModuloOperand<ValueType> << <LSDN_GET_BLOCKS(numEl), LSDN_CUDA_NUM_THREADS >> >(res, numEl, addend, op_division, op_modulo);
check_cuda_errors(__FILE__, __LINE__);
}
template <typename T>
__global__ void kernel_BiasDerivativeSingleDim(T* res, const T* input, const int patchSize, const int numSamples, const int numChannels, const int sampleSize, bool performAddition) {
if (performAddition) {
CUDA_KERNEL_LOOP(index, numChannels) {
const T* ptr = input + patchSize*index;
for (int k = 0; k < patchSize*numSamples; ++k) {
int sample = k / patchSize;
int offset = k % patchSize;
res[index] += ptr[sampleSize*sample + offset];
}
}
} else {
CUDA_KERNEL_LOOP(index, numChannels) {
const T* ptr = input + patchSize*index;
res[index] = T(0);
for (int k = 0; k < patchSize*numSamples; ++k) {
int sample = k / patchSize;
int offset = k % patchSize;
res[index] += ptr[sampleSize*sample + offset];
}
}
}
}
template <class N>
void ConvSubFunction<N>::BiasDerivativeSingleDim(i2t<true>, ValueType* res, ValueType* input, SizeType patchSize, SizeType numSamples, SizeType numChannels, bool performAddition) {
kernel_BiasDerivativeSingleDim<ValueType> << <LSDN_GET_BLOCKS(numChannels), LSDN_CUDA_NUM_THREADS >> >(res, input, patchSize, numSamples, numChannels, patchSize*numChannels, performAddition);
check_cuda_errors(__FILE__, __LINE__);
}
template <typename T>
__global__ void kernel_BiasDerivativeMultiDim(T* res, const T* input, const int sampleSize, const int numSamples, bool performAddition) {
if (performAddition) {
CUDA_KERNEL_LOOP(index, sampleSize) {
for (int k = 0; k < numSamples; ++k) {
res[index] += input[k*sampleSize + index];
}
}
} else {
CUDA_KERNEL_LOOP(index, sampleSize) {
res[index] = T(0);
for (int k = 0; k < numSamples; ++k) {
res[index] += input[k*sampleSize + index];
}
}
}
}
template <class N>
void ConvSubFunction<N>::BiasDerivativeMultiDim(i2t<true>, ValueType* res, ValueType* input, SizeType sampleSize, SizeType numSamples, bool performAddition) {
kernel_BiasDerivativeMultiDim<ValueType> << <LSDN_GET_BLOCKS(sampleSize), LSDN_CUDA_NUM_THREADS >> >(res, input, sampleSize, numSamples, performAddition);
check_cuda_errors(__FILE__, __LINE__);
}
template class ConvSubFunction<Node<double, int, false> >;
template class ConvSubFunction<Node<double, int, true> >;
template class ConvSubFunction<Node<float, int, false> >;
template class ConvSubFunction<Node<float, int, true> >;
|
8969362545eca119c8269a98a8b391ade8044f65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// perform outer product instead of inner product.//
/***
matrix A is stored in shared memory, but matrix B and C are stored in registers.
The outer product does not require sharing of matrix B and matrix C,
therefore, each thread only stores one element of B and one column of the tile of C in the register.
The "computation-to-memory ratio" of the outer product is the same as the inner product.
***/
/*
* 5KK73
* Eindhoven University of Technology
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#include <stdio.h>
#include "../parser.h"
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B
//! wA is A's width and wB is B's width
////////////////////////////////////////////////////////////////////////////////
__global__ void
unroll_kernel( float* C, float* A, float* B, int interDim)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As_trans[12];
// As[threadIdx.x * blockDim.y + threadIdx.y] = A[threadIdx.x * blockDim.y + threadIdx.y];
As_trans[threadIdx.y * blockDim.x + threadIdx.x] = A[threadIdx.x * blockDim.y + threadIdx.y]; //[coalescing]
__syncthreads();
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
float cv[12] = {0,0,0,0, \
0,0,0,0, \
0,0,0,0};
//
// 1.shared memory
// 2.
#pragma unroll
for (int i = 0 ; i < interDim; ++i) {
cv[threadIdx.x * interDim + threadIdx.y] += B[i * interDim +threadIdx.y] \
* As_trans[i * blockDim.x + threadIdx.x];
}
__syncthreads();
// Write the block sub-matrix to device memory;
// each thread writes one element
C[threadIdx.x * blockDim.y + threadIdx.y] = cv[threadIdx.x * blockDim.y + threadIdx.y];
}
void parser::matmul_unroll( matrix& C) {
float* dev_a;
hipMalloc(&dev_a, A.row * A.col * sizeof(float));
hipMemcpy(dev_a, A.elements, A.row * A.col * sizeof(float), hipMemcpyHostToDevice);
float* dev_b;
hipMalloc(&dev_b, B.row * B.col * sizeof(float));
hipMemcpy(dev_b, B.elements, B.row * B.col * sizeof(float), hipMemcpyHostToDevice);
float* dev_c;
hipMalloc(&dev_c, C.row * C.col * sizeof(float));
dim3 block_size(3,4);
// dim3 grid_size(1);
hipLaunchKernelGGL(( unroll_kernel), dim3(1) , dim3(block_size) , 2 * sizeof(float), 0, dev_c, dev_a, dev_b , 4);
hipDeviceSynchronize();
hipMemcpy(C.elements, dev_c, C.row * C.col * sizeof(float), hipMemcpyDeviceToHost);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return;
} | 8969362545eca119c8269a98a8b391ade8044f65.cu | // perform outer product instead of inner product.//
/***
matrix A is stored in shared memory, but matrix B and C are stored in registers.
The outer product does not require sharing of matrix B and matrix C,
therefore, each thread only stores one element of B and one column of the tile of C in the register.
The "computation-to-memory ratio" of the outer product is the same as the inner product.
***/
/*
* 5KK73
* Eindhoven University of Technology
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#include <stdio.h>
#include "../parser.h"
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B
//! wA is A's width and wB is B's width
////////////////////////////////////////////////////////////////////////////////
__global__ void
unroll_kernel( float* C, float* A, float* B, int interDim)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As_trans[12];
// As[threadIdx.x * blockDim.y + threadIdx.y] = A[threadIdx.x * blockDim.y + threadIdx.y];
As_trans[threadIdx.y * blockDim.x + threadIdx.x] = A[threadIdx.x * blockDim.y + threadIdx.y]; //使用转置,让索引之间里的更近,加速访问[coalescing]
__syncthreads();
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
float cv[12] = {0,0,0,0, \
0,0,0,0, \
0,0,0,0};
// 使用外循环的方式来替代内循环
// 1.提高shared memory的利用率
// 2.简化流处理器的计算指令
#pragma unroll
for (int i = 0 ; i < interDim; ++i) {
cv[threadIdx.x * interDim + threadIdx.y] += B[i * interDim +threadIdx.y] \
* As_trans[i * blockDim.x + threadIdx.x];
}
__syncthreads();
// Write the block sub-matrix to device memory;
// each thread writes one element
C[threadIdx.x * blockDim.y + threadIdx.y] = cv[threadIdx.x * blockDim.y + threadIdx.y];
}
void parser::matmul_unroll( matrix& C) {
float* dev_a;
cudaMalloc(&dev_a, A.row * A.col * sizeof(float));
cudaMemcpy(dev_a, A.elements, A.row * A.col * sizeof(float), cudaMemcpyHostToDevice);
float* dev_b;
cudaMalloc(&dev_b, B.row * B.col * sizeof(float));
cudaMemcpy(dev_b, B.elements, B.row * B.col * sizeof(float), cudaMemcpyHostToDevice);
float* dev_c;
cudaMalloc(&dev_c, C.row * C.col * sizeof(float));
dim3 block_size(3,4);
// dim3 grid_size(1);
unroll_kernel<<< 1 , block_size , 2 * sizeof(float)>>>(dev_c, dev_a, dev_b , 4);
cudaDeviceSynchronize();
cudaMemcpy(C.elements, dev_c, C.row * C.col * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return;
} |
00a899bcec662a8fb8e786f9a436969611ff19a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "jacketSDK.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "hip/device_functions.h"
#include <iostream>
#define TPB 64
__global__ void D3Q15_RegBC_LBGK_ts(const float * fIn, float * fOut,
const int * SNL,
const int * VW_nl, const float * VW_uz,
const int * PE_nl, const float * rho_out,
const float omega,
const int Nx, const int Ny, const int Nz)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int nnodes=Nx*Ny*Nz;
if(tid<nnodes){
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14;
float cu;
float w;
//load the data into registers
f0=fIn[tid]; f1=fIn[nnodes+tid];
f2=fIn[2*nnodes+tid]; f3=fIn[3*nnodes+tid];
f4=fIn[4*nnodes+tid]; f5=fIn[5*nnodes+tid];
f6=fIn[6*nnodes+tid]; f7=fIn[7*nnodes+tid];
f8=fIn[8*nnodes+tid]; f9=fIn[9*nnodes+tid];
f10=fIn[10*nnodes+tid]; f11=fIn[11*nnodes+tid];
f12=fIn[12*nnodes+tid]; f13=fIn[13*nnodes+tid];
f14=fIn[14*nnodes+tid];
//compute density and velocity
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14;
float ux=f1-f2+f7-f8+f9-f10+f11-f12+f13-f14; ux/=rho;
float uy=f3-f4+f7+f8-f9-f10+f11+f12-f13-f14; uy/=rho;
float uz=f5-f6+f7+f8+f9+f10-f11-f12-f13-f14; uz/=rho;
//take appropriate action if on PE_nl or VW_nl
if(VW_nl[tid]==1){
ux=0;uy=0; uz=VW_uz[tid];
//set rho based on uz
rho = (1./(1.-uz))*(2.0*(f6+f11+f12+f13+f14)+(f0+f1+f2+f3+f4));
}
if(PE_nl[tid]==1){
ux=0.; uy=0.; rho=rho_out[tid];
uz = -1.+((2.*(f5+f7+f8+f9+f10)+(f0+f1+f2+f3+f4)))/rho;
}
if(SNL[tid]==1){
ux=0.; uy=0.; uz=0.;
}
//everyone compute equilibrium
float fe0,fe1,fe2,fe3,fe4,fe5,fe6,fe7,fe8,fe9,fe10,fe11,fe12,fe13,fe14;
//speed 0 ex=ey=ez=0 w=2./9.
fe0=rho*(2./9.)*(1.-1.5*(ux*ux+uy*uy+uz*uz));
//speed 1 ex=1 ey=ez=0 w=1./9.
cu=3.*(1.*ux);
fe1=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 2 ex=-1 ey=ez=0 w=1./9.
cu=3.*((-1.)*ux);
fe2=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 3 ex=0 ey=1 ez=0 w=1./9.
cu=3.*(1.*uy);
fe3=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 4 ex=0 ey=-1 ez=0 w=1./9.
cu=3.*(-1.*uy);
fe4=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 5 ex=ey=0 ez=1 w=1./9.
cu=3.*(1.*uz);
fe5=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 6 ex=ey=0 ez=-1 w=1./9.
cu=3.*(-1.*uz);
fe6=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 7 ex=ey=ez=1 w=1./72.
cu=3.*(ux+uy+uz);
fe7=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 8 ex=-1 ey=ez=1 w=1./72.
cu=3.*(-ux+uy+uz);
fe8=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 9 ex=1 ey=-1 ez=1 w=1./72.
cu=3.*(ux-uy+uz);
fe9=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 10 ex=-1 ey=-1 ez=1 w=1/72
cu=3.*(-ux-uy+uz);
fe10=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 11 ex=1 ey=1 ez=-1 w=1/72
cu=3.*(ux+uy-uz);
fe11=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 12 ex=-1 ey=1 ez=-1 w=1/72
cu=3.*(-ux+uy-uz);
fe12=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 13 ex=1 ey=ez=-1 w=1/72
cu=3.*(ux-uy-uz);
fe13=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 14 ex=ey=ez=-1 w=1/72
cu=3.*(-ux-uy-uz);
fe14=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
if((VW_nl[tid]==1)|(PE_nl[tid]==1)){
float ft1,ft2,ft3,ft4,ft5,ft6,ft7,ft8,ft9,ft10,ft11,ft12,ft13,ft14;
if(VW_nl[tid]==1){
//adjust fIn for the unknown velocities: 5,7,8,9,10
//bounce-back of non-equilibrium parts
//f5, bb_spd=f6
f5=fe5+(f6-fe6); //fIn[5*nnodes+tid]=f5;
//f7, bb_spd=f14
f7=fe7+(f14-fe14); //fIn[7*nnodes+tid]=f7;
//f8, bb_spd=f13
f8=fe8+(f13-fe13); //fIn[8*nnodes+tid]=f8;
//f9, bb_spd=f12
f9=fe9+(f12-fe12); //fIn[9*nnodes+tid]=f9;
//f10, bb_spd=f11
f10=fe10+(f11-fe11); //fIn[10*nnodes+tid]=f10;
}else{
f6=fe6+(f5-fe5);
f11=fe11+(f10-fe10);
f12=fe12+(f9-fe9);
f13=fe13+(f8-fe8);
f14=fe14+(f7-fe7);
}
ft1=f1-fe1;
ft2=f2-fe2;
ft3=f3-fe3;
ft4=f4-fe4;
ft5=f5-fe5;
ft6=f6-fe6;
ft7=f7-fe7;
ft8=f8-fe8;
ft9=f9-fe9;
ft10=f10-fe10;
ft11=f11-fe11;
ft12=f12-fe12;
ft13=f13-fe13;
ft14=f14-fe14;
//now, multiply by f# = ((ft#)*Q_flat)*Q_flat'
f0=0;
f1=ft1+ft2+ft7+ft8+ft9+ft10+ft11+ft12+ft13+ft14;
f2=f1;
f3=ft3+ft4+ft7+ft8+ft9+ft10+ft11+ft12+ft13+ft14;
f4=f3;
f5=ft5+ft6+ft7+ft8+ft9+ft10+ft11+ft12+ft13+ft14;
f6=f5;
f7=ft1+ft2+ft3+ft4+ft5+ft6+9.*ft7+ft8+ft9+ft10+ft11+ft12+ft13+9.*ft14;
f8=ft1+ft2+ft3+ft4+ft5+ft6+ft7+9.*ft8+ft9+ft10+ft11+ft12+9.*ft13+ft14;
f9=ft1+ft2+ft3+ft4+ft5+ft6+ft7+ft8+9.*ft9+ft10+ft11+9.*ft12+ft13+ft14;
f10=ft1+ft2+ft3+ft4+ft5+ft6+ft7+ft8+ft9+9.*ft10+9.*ft11+ft12+ft13+ft14;
f11=ft1+ft2+ft3+ft4+ft5+ft6+ft7+ft8+ft9+9.*ft10+9.*ft11+ft12+ft13+ft14;
f12=ft1+ft2+ft3+ft4+ft5+ft6+ft7+ft8+9.*ft9+ft10+ft11+9.*ft12+ft13+ft14;
f13=ft1+ft2+ft3+ft4+ft5+ft6+ft7+9.*ft8+ft9+ft10+ft11+ft12+9.*ft13+ft14;
f14=ft1+ft2+ft3+ft4+ft5+ft6+9.*ft7+ft8+ft9+ft10+ft11+ft12+ft13+9.*ft14;
//update fIn for all velocities based on strain tensor
//f0, still equals 0..
cu = 9./2.; w = 1./9.;
//fIn[..] = fe#+f#
f0=fe0;
f1=fe1+f1*(cu)*w;
f2=fe2+f2*(cu)*w;
f3=fe3+f3*cu*w;
f4=fe4+f4*cu*w;
f5=fe5+f5*cu*w;
f6=fe6+f6*cu*w;
w = 1./72.;
f7=fe7+f7*cu*w;
f8=fe8+f8*cu*w;
f9=fe9+f9*cu*w;
f10=fe10+f10*cu*w;
f11=fe11+f11*cu*w;
f12=fe12+f12*cu*w;
f13=fe13+f13*cu*w;
f14=fe14+f14*cu*w;
}
//everyone relax...
f0=f0-omega*(f0-fe0);
f1=f1-omega*(f1-fe1);
f2=f2-omega*(f2-fe2);
f3=f3-omega*(f3-fe3);
f4=f4-omega*(f4-fe4);
f5=f5-omega*(f5-fe5);
f6=f6-omega*(f6-fe6);
f7=f7-omega*(f7-fe7);
f8=f8-omega*(f8-fe8);
f9=f9-omega*(f9-fe9);
f10=f10-omega*(f10-fe10);
f11=f11-omega*(f11-fe11);
f12=f12-omega*(f12-fe12);
f13=f13-omega*(f13-fe13);
f14=f14-omega*(f14-fe14);
//now, everybody streams...
int X_t, Y_t, Z_t;
int tid_t;
int Z = tid/(Nx*Ny);
int Y = (tid - Z*Nx*Ny)/Nx;
int X = tid - Z*Nx*Ny - Y*Nx;
//speed 0 ex=ey=ez=0
fOut[tid]=f0;
//speed 1 ex=1 ey=ez=0
X_t=X+1; Y_t=Y; Z_t=Z;
if(X_t==Nx) X_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[Nx*Ny*Nz+tid_t]=f1;
//speed 2 ex=-1 ey=ez=0;
X_t=X-1; Y_t=Y; Z_t=Z;
if(X_t<0) X_t=(Nx-1);
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[2*Nx*Ny*Nz+tid_t]=f2;
//speed 3 ex=0 ey=1 ez=0
X_t=X; Y_t=Y+1; Z_t=Z;
if(Y_t==Ny) Y_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[3*Nx*Ny*Nz+tid_t]=f3;
//speed 4 ex=0 ey=-1 ez=0
X_t=X; Y_t=Y-1; Z_t=Z;
if(Y_t<0) Y_t=(Ny-1);
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[4*Nx*Ny*Nz+tid_t]=f4;
//speed 5 ex=ey=0 ez=1
X_t=X; Y_t=Y; Z_t=Z+1;
if(Z_t==Nz) Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[5*Nx*Ny*Nz+tid_t]=f5;
//speed 6 ex=ey=0 ez=-1
X_t=X; Y_t=Y; Z_t=Z-1;
if(Z_t<0) Z_t=(Nz-1);
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[6*Nx*Ny*Nz+tid_t]=f6;
//speed 7 ex=ey=ez=1
X_t=X+1; Y_t=Y+1; Z_t=Z+1;
if(X_t==Nx) X_t=0;
if(Y_t==Ny) Y_t=0;
if(Z_t==Nz) Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[7*Nx*Ny*Nz+tid_t]=f7;
//speed 8 ex=-1 ey=1 ez=1
X_t=X-1; Y_t=Y+1; Z_t=Z+1;
if(X_t<0) X_t=(Nx-1);
if(Y_t==Ny) Y_t=0;
if(Z_t==Nz) Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[8*Nx*Ny*Nz+tid_t]=f8;
//speed 9 ex=1 ey=-1 ez=1
X_t=X+1; Y_t=Y-1; Z_t=Z+1;
if(X_t==Nx) X_t=0;
if(Y_t<0) Y_t=(Ny-1);
if(Z_t==Nz) Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[9*Nx*Ny*Nz+tid_t]=f9;
//speed 10 ex=-1 ey=-1 ez=1
X_t=X-1; Y_t=Y-1; Z_t=Z+1;
if(X_t<0) X_t=(Nx-1);
if(Y_t<0) Y_t=(Ny-1);
if(Z_t==Nz) Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[10*Nx*Ny*Nz+tid_t]=f10;
//speed 11 ex=1 ey=1 ez=-1
X_t=X+1; Y_t=Y+1; Z_t=Z-1;
if(X_t==Nx) X_t=0;
if(Y_t==Ny) Y_t=0;
if(Z_t<0) Z_t=(Nz-1);
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[11*Nx*Ny*Nz+tid_t]=f11;
//speed 12 ex=-1 ey=1 ez=-1
X_t=X-1; Y_t=Y+1; Z_t=Z-1;
if(X_t<0) X_t=(Nx-1);
if(Y_t==Ny) Y_t=0;
if(Z_t<0) Z_t=(Nz-1);
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[12*Nx*Ny*Nz+tid_t]=f12;
//speed 13 ex=1 ey=-1 ez=-1
X_t=X+1; Y_t=Y-1; Z_t=Z-1;
if(X_t==Nx) X_t=0;
if(Y_t<0) Y_t=(Ny-1);
if(Z_t<0) Z_t=(Nz-1);
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[13*Nx*Ny*Nz+tid_t]=f13;
//speed 14 ex=ey=ez=-1
X_t=X-1; Y_t=Y-1; Z_t=Z-1;
if(X_t<0) X_t=(Nx-1);
if(Y_t<0) Y_t=(Ny-1);
if(Z_t<0) Z_t=(Nz-1);
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[14*Nx*Ny*Nz+tid_t]=f14;
}
}
err_t jktFunction(int nlhs,mxArray * plhs[],int nrhs,mxArray * prhs[]){
if(nrhs!=11)
return err("Usage:D3Q15_RegBC_LBGK_ts(fIn,fOut,SNL,VW_nl,VW_uz,PE_nl,rho_out,omega,Nx,Ny,Nz)");
mxArray * m_fIn = prhs[0];
mxArray * m_fOut = prhs[1];
mxArray * m_SNL=prhs[2];
mxArray * m_VW_nl = prhs[3];
mxArray * m_VW_uz = prhs[4];
mxArray * m_PE_nl = prhs[5];
mxArray * m_rho_out = prhs[6];
float omega = mxGetScalar(prhs[7]);
int Nx = mxGetScalar(prhs[8]);
int Ny = mxGetScalar(prhs[9]);
int Nz = mxGetScalar(prhs[10]);
float * fIn;
float * fOut;
int * SNL;
int * VW_nl;
float * VW_uz;
int * PE_nl;
float * rho_out;
jkt_mem((void**)&fIn,m_fIn);
jkt_mem((void**)&fOut,m_fOut);
jkt_mem((void**)&SNL,m_SNL);
jkt_mem((void**)&VW_nl,m_VW_nl);
jkt_mem((void**)&VW_uz,m_VW_uz);
jkt_mem((void**)&PE_nl,m_PE_nl);
jkt_mem((void**)&rho_out,m_rho_out);
dim3 BLOCKS(TPB,1,1);
dim3 GRIDS((Nx*Ny*Nz+TPB-1)/TPB,1,1);
hipLaunchKernelGGL(( D3Q15_RegBC_LBGK_ts), dim3(GRIDS),dim3(BLOCKS), 0, 0, fIn,fOut,SNL,VW_nl,VW_uz,PE_nl,rho_out,
omega,Nx,Ny,Nz);
return errNone;
}
| 00a899bcec662a8fb8e786f9a436969611ff19a2.cu | #include "jacketSDK.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "device_functions.h"
#include <iostream>
#define TPB 64
__global__ void D3Q15_RegBC_LBGK_ts(const float * fIn, float * fOut,
const int * SNL,
const int * VW_nl, const float * VW_uz,
const int * PE_nl, const float * rho_out,
const float omega,
const int Nx, const int Ny, const int Nz)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int nnodes=Nx*Ny*Nz;
if(tid<nnodes){
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14;
float cu;
float w;
//load the data into registers
f0=fIn[tid]; f1=fIn[nnodes+tid];
f2=fIn[2*nnodes+tid]; f3=fIn[3*nnodes+tid];
f4=fIn[4*nnodes+tid]; f5=fIn[5*nnodes+tid];
f6=fIn[6*nnodes+tid]; f7=fIn[7*nnodes+tid];
f8=fIn[8*nnodes+tid]; f9=fIn[9*nnodes+tid];
f10=fIn[10*nnodes+tid]; f11=fIn[11*nnodes+tid];
f12=fIn[12*nnodes+tid]; f13=fIn[13*nnodes+tid];
f14=fIn[14*nnodes+tid];
//compute density and velocity
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14;
float ux=f1-f2+f7-f8+f9-f10+f11-f12+f13-f14; ux/=rho;
float uy=f3-f4+f7+f8-f9-f10+f11+f12-f13-f14; uy/=rho;
float uz=f5-f6+f7+f8+f9+f10-f11-f12-f13-f14; uz/=rho;
//take appropriate action if on PE_nl or VW_nl
if(VW_nl[tid]==1){
ux=0;uy=0; uz=VW_uz[tid];
//set rho based on uz
rho = (1./(1.-uz))*(2.0*(f6+f11+f12+f13+f14)+(f0+f1+f2+f3+f4));
}
if(PE_nl[tid]==1){
ux=0.; uy=0.; rho=rho_out[tid];
uz = -1.+((2.*(f5+f7+f8+f9+f10)+(f0+f1+f2+f3+f4)))/rho;
}
if(SNL[tid]==1){
ux=0.; uy=0.; uz=0.;
}
//everyone compute equilibrium
float fe0,fe1,fe2,fe3,fe4,fe5,fe6,fe7,fe8,fe9,fe10,fe11,fe12,fe13,fe14;
//speed 0 ex=ey=ez=0 w=2./9.
fe0=rho*(2./9.)*(1.-1.5*(ux*ux+uy*uy+uz*uz));
//speed 1 ex=1 ey=ez=0 w=1./9.
cu=3.*(1.*ux);
fe1=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 2 ex=-1 ey=ez=0 w=1./9.
cu=3.*((-1.)*ux);
fe2=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 3 ex=0 ey=1 ez=0 w=1./9.
cu=3.*(1.*uy);
fe3=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 4 ex=0 ey=-1 ez=0 w=1./9.
cu=3.*(-1.*uy);
fe4=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 5 ex=ey=0 ez=1 w=1./9.
cu=3.*(1.*uz);
fe5=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 6 ex=ey=0 ez=-1 w=1./9.
cu=3.*(-1.*uz);
fe6=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 7 ex=ey=ez=1 w=1./72.
cu=3.*(ux+uy+uz);
fe7=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 8 ex=-1 ey=ez=1 w=1./72.
cu=3.*(-ux+uy+uz);
fe8=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 9 ex=1 ey=-1 ez=1 w=1./72.
cu=3.*(ux-uy+uz);
fe9=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 10 ex=-1 ey=-1 ez=1 w=1/72
cu=3.*(-ux-uy+uz);
fe10=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 11 ex=1 ey=1 ez=-1 w=1/72
cu=3.*(ux+uy-uz);
fe11=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 12 ex=-1 ey=1 ez=-1 w=1/72
cu=3.*(-ux+uy-uz);
fe12=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 13 ex=1 ey=ez=-1 w=1/72
cu=3.*(ux-uy-uz);
fe13=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 14 ex=ey=ez=-1 w=1/72
cu=3.*(-ux-uy-uz);
fe14=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
if((VW_nl[tid]==1)|(PE_nl[tid]==1)){
float ft1,ft2,ft3,ft4,ft5,ft6,ft7,ft8,ft9,ft10,ft11,ft12,ft13,ft14;
if(VW_nl[tid]==1){
//adjust fIn for the unknown velocities: 5,7,8,9,10
//bounce-back of non-equilibrium parts
//f5, bb_spd=f6
f5=fe5+(f6-fe6); //fIn[5*nnodes+tid]=f5;
//f7, bb_spd=f14
f7=fe7+(f14-fe14); //fIn[7*nnodes+tid]=f7;
//f8, bb_spd=f13
f8=fe8+(f13-fe13); //fIn[8*nnodes+tid]=f8;
//f9, bb_spd=f12
f9=fe9+(f12-fe12); //fIn[9*nnodes+tid]=f9;
//f10, bb_spd=f11
f10=fe10+(f11-fe11); //fIn[10*nnodes+tid]=f10;
}else{
f6=fe6+(f5-fe5);
f11=fe11+(f10-fe10);
f12=fe12+(f9-fe9);
f13=fe13+(f8-fe8);
f14=fe14+(f7-fe7);
}
ft1=f1-fe1;
ft2=f2-fe2;
ft3=f3-fe3;
ft4=f4-fe4;
ft5=f5-fe5;
ft6=f6-fe6;
ft7=f7-fe7;
ft8=f8-fe8;
ft9=f9-fe9;
ft10=f10-fe10;
ft11=f11-fe11;
ft12=f12-fe12;
ft13=f13-fe13;
ft14=f14-fe14;
//now, multiply by f# = ((ft#)*Q_flat)*Q_flat'
f0=0;
f1=ft1+ft2+ft7+ft8+ft9+ft10+ft11+ft12+ft13+ft14;
f2=f1;
f3=ft3+ft4+ft7+ft8+ft9+ft10+ft11+ft12+ft13+ft14;
f4=f3;
f5=ft5+ft6+ft7+ft8+ft9+ft10+ft11+ft12+ft13+ft14;
f6=f5;
f7=ft1+ft2+ft3+ft4+ft5+ft6+9.*ft7+ft8+ft9+ft10+ft11+ft12+ft13+9.*ft14;
f8=ft1+ft2+ft3+ft4+ft5+ft6+ft7+9.*ft8+ft9+ft10+ft11+ft12+9.*ft13+ft14;
f9=ft1+ft2+ft3+ft4+ft5+ft6+ft7+ft8+9.*ft9+ft10+ft11+9.*ft12+ft13+ft14;
f10=ft1+ft2+ft3+ft4+ft5+ft6+ft7+ft8+ft9+9.*ft10+9.*ft11+ft12+ft13+ft14;
f11=ft1+ft2+ft3+ft4+ft5+ft6+ft7+ft8+ft9+9.*ft10+9.*ft11+ft12+ft13+ft14;
f12=ft1+ft2+ft3+ft4+ft5+ft6+ft7+ft8+9.*ft9+ft10+ft11+9.*ft12+ft13+ft14;
f13=ft1+ft2+ft3+ft4+ft5+ft6+ft7+9.*ft8+ft9+ft10+ft11+ft12+9.*ft13+ft14;
f14=ft1+ft2+ft3+ft4+ft5+ft6+9.*ft7+ft8+ft9+ft10+ft11+ft12+ft13+9.*ft14;
//update fIn for all velocities based on strain tensor
//f0, still equals 0..
cu = 9./2.; w = 1./9.;
//fIn[..] = fe#+f#
f0=fe0;
f1=fe1+f1*(cu)*w;
f2=fe2+f2*(cu)*w;
f3=fe3+f3*cu*w;
f4=fe4+f4*cu*w;
f5=fe5+f5*cu*w;
f6=fe6+f6*cu*w;
w = 1./72.;
f7=fe7+f7*cu*w;
f8=fe8+f8*cu*w;
f9=fe9+f9*cu*w;
f10=fe10+f10*cu*w;
f11=fe11+f11*cu*w;
f12=fe12+f12*cu*w;
f13=fe13+f13*cu*w;
f14=fe14+f14*cu*w;
}
//everyone relax...
f0=f0-omega*(f0-fe0);
f1=f1-omega*(f1-fe1);
f2=f2-omega*(f2-fe2);
f3=f3-omega*(f3-fe3);
f4=f4-omega*(f4-fe4);
f5=f5-omega*(f5-fe5);
f6=f6-omega*(f6-fe6);
f7=f7-omega*(f7-fe7);
f8=f8-omega*(f8-fe8);
f9=f9-omega*(f9-fe9);
f10=f10-omega*(f10-fe10);
f11=f11-omega*(f11-fe11);
f12=f12-omega*(f12-fe12);
f13=f13-omega*(f13-fe13);
f14=f14-omega*(f14-fe14);
//now, everybody streams...
int X_t, Y_t, Z_t;
int tid_t;
int Z = tid/(Nx*Ny);
int Y = (tid - Z*Nx*Ny)/Nx;
int X = tid - Z*Nx*Ny - Y*Nx;
//speed 0 ex=ey=ez=0
fOut[tid]=f0;
//speed 1 ex=1 ey=ez=0
X_t=X+1; Y_t=Y; Z_t=Z;
if(X_t==Nx) X_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[Nx*Ny*Nz+tid_t]=f1;
//speed 2 ex=-1 ey=ez=0;
X_t=X-1; Y_t=Y; Z_t=Z;
if(X_t<0) X_t=(Nx-1);
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[2*Nx*Ny*Nz+tid_t]=f2;
//speed 3 ex=0 ey=1 ez=0
X_t=X; Y_t=Y+1; Z_t=Z;
if(Y_t==Ny) Y_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[3*Nx*Ny*Nz+tid_t]=f3;
//speed 4 ex=0 ey=-1 ez=0
X_t=X; Y_t=Y-1; Z_t=Z;
if(Y_t<0) Y_t=(Ny-1);
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[4*Nx*Ny*Nz+tid_t]=f4;
//speed 5 ex=ey=0 ez=1
X_t=X; Y_t=Y; Z_t=Z+1;
if(Z_t==Nz) Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[5*Nx*Ny*Nz+tid_t]=f5;
//speed 6 ex=ey=0 ez=-1
X_t=X; Y_t=Y; Z_t=Z-1;
if(Z_t<0) Z_t=(Nz-1);
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[6*Nx*Ny*Nz+tid_t]=f6;
//speed 7 ex=ey=ez=1
X_t=X+1; Y_t=Y+1; Z_t=Z+1;
if(X_t==Nx) X_t=0;
if(Y_t==Ny) Y_t=0;
if(Z_t==Nz) Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[7*Nx*Ny*Nz+tid_t]=f7;
//speed 8 ex=-1 ey=1 ez=1
X_t=X-1; Y_t=Y+1; Z_t=Z+1;
if(X_t<0) X_t=(Nx-1);
if(Y_t==Ny) Y_t=0;
if(Z_t==Nz) Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[8*Nx*Ny*Nz+tid_t]=f8;
//speed 9 ex=1 ey=-1 ez=1
X_t=X+1; Y_t=Y-1; Z_t=Z+1;
if(X_t==Nx) X_t=0;
if(Y_t<0) Y_t=(Ny-1);
if(Z_t==Nz) Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[9*Nx*Ny*Nz+tid_t]=f9;
//speed 10 ex=-1 ey=-1 ez=1
X_t=X-1; Y_t=Y-1; Z_t=Z+1;
if(X_t<0) X_t=(Nx-1);
if(Y_t<0) Y_t=(Ny-1);
if(Z_t==Nz) Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[10*Nx*Ny*Nz+tid_t]=f10;
//speed 11 ex=1 ey=1 ez=-1
X_t=X+1; Y_t=Y+1; Z_t=Z-1;
if(X_t==Nx) X_t=0;
if(Y_t==Ny) Y_t=0;
if(Z_t<0) Z_t=(Nz-1);
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[11*Nx*Ny*Nz+tid_t]=f11;
//speed 12 ex=-1 ey=1 ez=-1
X_t=X-1; Y_t=Y+1; Z_t=Z-1;
if(X_t<0) X_t=(Nx-1);
if(Y_t==Ny) Y_t=0;
if(Z_t<0) Z_t=(Nz-1);
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[12*Nx*Ny*Nz+tid_t]=f12;
//speed 13 ex=1 ey=-1 ez=-1
X_t=X+1; Y_t=Y-1; Z_t=Z-1;
if(X_t==Nx) X_t=0;
if(Y_t<0) Y_t=(Ny-1);
if(Z_t<0) Z_t=(Nz-1);
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[13*Nx*Ny*Nz+tid_t]=f13;
//speed 14 ex=ey=ez=-1
X_t=X-1; Y_t=Y-1; Z_t=Z-1;
if(X_t<0) X_t=(Nx-1);
if(Y_t<0) Y_t=(Ny-1);
if(Z_t<0) Z_t=(Nz-1);
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[14*Nx*Ny*Nz+tid_t]=f14;
}
}
err_t jktFunction(int nlhs,mxArray * plhs[],int nrhs,mxArray * prhs[]){
if(nrhs!=11)
return err("Usage:D3Q15_RegBC_LBGK_ts(fIn,fOut,SNL,VW_nl,VW_uz,PE_nl,rho_out,omega,Nx,Ny,Nz)");
mxArray * m_fIn = prhs[0];
mxArray * m_fOut = prhs[1];
mxArray * m_SNL=prhs[2];
mxArray * m_VW_nl = prhs[3];
mxArray * m_VW_uz = prhs[4];
mxArray * m_PE_nl = prhs[5];
mxArray * m_rho_out = prhs[6];
float omega = mxGetScalar(prhs[7]);
int Nx = mxGetScalar(prhs[8]);
int Ny = mxGetScalar(prhs[9]);
int Nz = mxGetScalar(prhs[10]);
float * fIn;
float * fOut;
int * SNL;
int * VW_nl;
float * VW_uz;
int * PE_nl;
float * rho_out;
jkt_mem((void**)&fIn,m_fIn);
jkt_mem((void**)&fOut,m_fOut);
jkt_mem((void**)&SNL,m_SNL);
jkt_mem((void**)&VW_nl,m_VW_nl);
jkt_mem((void**)&VW_uz,m_VW_uz);
jkt_mem((void**)&PE_nl,m_PE_nl);
jkt_mem((void**)&rho_out,m_rho_out);
dim3 BLOCKS(TPB,1,1);
dim3 GRIDS((Nx*Ny*Nz+TPB-1)/TPB,1,1);
D3Q15_RegBC_LBGK_ts<<<GRIDS,BLOCKS>>>(fIn,fOut,SNL,VW_nl,VW_uz,PE_nl,rho_out,
omega,Nx,Ny,Nz);
return errNone;
}
|
fcd2a37cd6ce2edd2b36a0eb10ecf148b0a6897d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
// CUDA kernel for forward
template <typename Dtype>
__global__ void BiasForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const Dtype* bias_data, const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] + bias_data[c];
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void BiasBackward(const int n, const Dtype* in_diff, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index];
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void BiasParamBackward(const int n, const Dtype* in_diff, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index];
}
}
template <typename Dtype>
void BiasLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const Dtype* bias_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BiasForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, dim, bottom_data, top_data, bias_data, div_factor);
CUDA_POST_KERNEL_CHECK;
//LOG(INFO) << "gpu:: bias layer";
}
template <typename Dtype>
void BiasLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
// Propagate to param
if (this->param_propagate_down_[0]) {
Dtype* bias_diff = this->blobs_[0]->mutable_gpu_diff();
// bias_diff is set as 0, then accumulated over batches
//caffe_gpu_set<Dtype>(this->blobs_[0]->count(), Dtype(0), bias_diff);
int cdim = channels * dim;
Dtype dsum = 0.;
for (int n = 0; n < bottom[0]->num(); ++n) {
Dtype* temp_buff = multiplier_.mutable_gpu_diff();
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BiasParamBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
cdim, top_diff + top[0]->offset(n), multiplier_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
// I do not have a single clue about what the hell happens here
if (channel_shared_) {
Dtype d;
caffe_gpu_dot<Dtype>(channels * dim, multiplier_.gpu_diff(), multiplier_.gpu_data(), &d);
dsum += d;
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
multiplier_.gpu_diff(), multiplier_.gpu_data(), 1., bias_diff);
}
} //end for loop
if (channel_shared_) {
//caffe_gpu_set(this->blobs_[0]->count(), Dtype(dsum), bias_diff);
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), bias_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BiasBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BiasLayer);
} // namespace caffe
| fcd2a37cd6ce2edd2b36a0eb10ecf148b0a6897d.cu | #include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
// CUDA kernel for forward
template <typename Dtype>
__global__ void BiasForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const Dtype* bias_data, const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] + bias_data[c];
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void BiasBackward(const int n, const Dtype* in_diff, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index];
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void BiasParamBackward(const int n, const Dtype* in_diff, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index];
}
}
template <typename Dtype>
void BiasLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const Dtype* bias_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
BiasForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, channels, dim, bottom_data, top_data, bias_data, div_factor);
CUDA_POST_KERNEL_CHECK;
//LOG(INFO) << "gpu:: bias layer";
}
template <typename Dtype>
void BiasLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
// Propagate to param
if (this->param_propagate_down_[0]) {
Dtype* bias_diff = this->blobs_[0]->mutable_gpu_diff();
// bias_diff is set as 0, then accumulated over batches
//caffe_gpu_set<Dtype>(this->blobs_[0]->count(), Dtype(0), bias_diff);
int cdim = channels * dim;
Dtype dsum = 0.;
for (int n = 0; n < bottom[0]->num(); ++n) {
Dtype* temp_buff = multiplier_.mutable_gpu_diff();
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
BiasParamBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
cdim, top_diff + top[0]->offset(n), multiplier_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
// I do not have a single clue about what the hell happens here
if (channel_shared_) {
Dtype d;
caffe_gpu_dot<Dtype>(channels * dim, multiplier_.gpu_diff(), multiplier_.gpu_data(), &d);
dsum += d;
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
multiplier_.gpu_diff(), multiplier_.gpu_data(), 1., bias_diff);
}
} //end for loop
if (channel_shared_) {
//caffe_gpu_set(this->blobs_[0]->count(), Dtype(dsum), bias_diff);
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), bias_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
BiasBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BiasLayer);
} // namespace caffe
|
99206304004d9b3beed891876dba4e16f51ad374.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <random>
#include <vector>
#include "gtest/gtest.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/fused/attn_feed_forward.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/float16.h"
namespace framework = paddle::framework;
namespace platform = paddle::platform;
USE_OP(matmul);
USE_OP(elementwise_add);
// get paddle matmul op results as baseline
template <typename T>
void GetLinearOp(const std::vector<T> &x, const std::vector<T> &y,
const framework::DDim &x_dim, const framework::DDim &y_dim,
const platform::CUDADeviceContext &ctx, bool transpose_a,
bool transpose_b, float alpha, std::vector<T> *out) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_out = scope.Var("Out");
auto tensor_out = var_out->GetMutable<framework::LoDTensor>();
tensor_x->Resize(x_dim);
tensor_y->Resize(y_dim);
tensor_out->Resize({x_dim[0], x_dim[1], y_dim[0]});
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto z_ptr = tensor_out->mutable_data<T>(ctx.GetPlace());
auto size_x = static_cast<size_t>(framework::product(x_dim));
auto size_y = static_cast<size_t>(framework::product(y_dim));
auto size_z = x_dim[0] * x_dim[1] * y_dim[0];
hipMemcpy(x_ptr, x.data(), size_x * sizeof(T), hipMemcpyHostToDevice);
hipMemcpy(y_ptr, y.data(), size_y * sizeof(T), hipMemcpyHostToDevice);
framework::AttributeMap attrs;
attrs.insert({"transpose_X", transpose_a});
attrs.insert({"transpose_Y", transpose_b});
attrs.insert({"alpha", alpha});
auto op = framework::OpRegistry::CreateOp(
"matmul", {{"X", {"X"}}, {"Y", {"Y"}}}, {{"Out", {"Out"}}}, attrs);
op->Run(scope, ctx.GetPlace());
hipMemcpy(out->data(), z_ptr, size_z * sizeof(T), hipMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle elementwise_add op results as baseline
template <typename T>
void GetElementwiseAddOp(const std::vector<T> &x, const std::vector<T> &y,
const int bsz_seq, const int output_size,
const platform::CUDADeviceContext &ctx,
std::vector<T> *out) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_out = scope.Var("Out");
auto tensor_out = var_out->GetMutable<framework::LoDTensor>();
tensor_x->Resize({bsz_seq, output_size});
tensor_y->Resize({output_size});
tensor_out->Resize({bsz_seq, output_size});
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto z_ptr = tensor_out->mutable_data<T>(ctx.GetPlace());
auto size_x = bsz_seq * output_size;
auto size_y = output_size;
auto size_z = bsz_seq * output_size;
hipMemcpy(x_ptr, x.data(), size_x * sizeof(T), hipMemcpyHostToDevice);
hipMemcpy(y_ptr, y.data(), size_y * sizeof(T), hipMemcpyHostToDevice);
framework::AttributeMap attrs;
auto op = framework::OpRegistry::CreateOp("elementwise_add",
{{"X", {"X"}}, {"Y", {"Y"}}},
{{"Out", {"Out"}}}, attrs);
op->Run(scope, ctx.GetPlace());
hipMemcpy(out->data(), z_ptr, size_z * sizeof(T), hipMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle matmul_grad op results as baseline
template <typename T>
void GetLinearOpGrad(const std::vector<T> &x_vec, const std::vector<T> &y_vec,
const std::vector<T> &dout_vec,
const framework::DDim &x_dim, const framework::DDim &y_dim,
const framework::DDim &out_dim,
const platform::CUDADeviceContext &ctx, bool transpose_a,
bool transpose_b, float alpha, std::vector<T> *dinput_vec,
std::vector<T> *dweight_vec) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_dout = scope.Var("DOut");
auto tensor_dout = var_dout->GetMutable<framework::LoDTensor>();
tensor_x->Resize(x_dim);
tensor_y->Resize(y_dim);
tensor_dout->Resize(out_dim);
auto var_dx = scope.Var("DX");
auto tensor_dx = var_dx->GetMutable<framework::LoDTensor>();
auto var_dy = scope.Var("DY");
auto tensor_dy = var_dy->GetMutable<framework::LoDTensor>();
tensor_dx->Resize(x_dim);
tensor_dy->Resize(y_dim);
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto dout_ptr = tensor_dout->mutable_data<T>(ctx.GetPlace());
auto dinput_ptr = tensor_dx->mutable_data<T>(ctx.GetPlace());
auto dweight_ptr = tensor_dy->mutable_data<T>(ctx.GetPlace());
auto size_x = static_cast<size_t>(framework::product(x_dim));
auto size_y = static_cast<size_t>(framework::product(y_dim));
auto size_z = x_dim[0] * x_dim[1] * y_dim[0];
hipMemcpy(x_ptr, x_vec.data(), size_x * sizeof(T), hipMemcpyHostToDevice);
hipMemcpy(y_ptr, y_vec.data(), size_y * sizeof(T), hipMemcpyHostToDevice);
hipMemcpy(dout_ptr, dout_vec.data(), size_z * sizeof(T),
hipMemcpyHostToDevice);
bool use_mkldnn = false;
std::vector<int> fused_reshape_X = {};
std::vector<int> fused_reshape_Y = {};
std::vector<int> fused_reshape_Out = {};
std::vector<int> fused_transpose_X = {};
std::vector<int> fused_transpose_Y = {};
std::vector<int> fused_transpose_Out = {};
bool use_quantizer = false, force_fp32_output = false;
std::string mkldnn_data_type = "float32";
float Scale_x = 1.0, Scale_y = 1.0, Scale_out = 1.0;
framework::AttributeMap attrs;
attrs.insert({"transpose_X", transpose_a});
attrs.insert({"transpose_Y", transpose_b});
attrs.insert({"alpha", alpha});
attrs.insert({"use_mkldnn", use_mkldnn});
attrs.insert({"fused_reshape_X", fused_reshape_X});
attrs.insert({"fused_reshape_Y", fused_reshape_Y});
attrs.insert({"fused_reshape_Out", fused_reshape_Out});
attrs.insert({"fused_transpose_X", fused_transpose_X});
attrs.insert({"fused_transpose_Y", fused_transpose_Y});
attrs.insert({"fused_transpose_Out", fused_transpose_Out});
attrs.insert({"use_quantizer", use_quantizer});
attrs.insert({"mkldnn_data_type", mkldnn_data_type});
attrs.insert({"Scale_x", Scale_x});
attrs.insert({"Scale_y", Scale_y});
attrs.insert({"Scale_out", Scale_out});
attrs.insert({"force_fp32_output", force_fp32_output});
auto op = framework::OpRegistry::CreateOp(
"matmul_grad", {{"Out@GRAD", {"DOut"}}, {"X", {"X"}}, {"Y", {"Y"}}},
{{"X@GRAD", {"DX"}}, {"Y@GRAD", {"DY"}}}, attrs);
op->Run(scope, ctx.GetPlace());
hipMemcpy(dinput_vec->data(), dinput_ptr, size_x * sizeof(T),
hipMemcpyDeviceToHost);
hipMemcpy(dweight_vec->data(), dweight_ptr, size_y * sizeof(T),
hipMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle elementwise_add_grad op results as baseline
template <typename T>
void GetElementwiseAddOpGrad(const std::vector<T> &dout_vec, const int bsz_seq,
const int output_size,
const platform::CUDADeviceContext &ctx,
std::vector<T> *dy_vec) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_dout = scope.Var("DOut");
auto tensor_dout = var_dout->GetMutable<framework::LoDTensor>();
tensor_x->Resize({bsz_seq, output_size});
tensor_y->Resize({output_size});
tensor_dout->Resize({bsz_seq, output_size});
auto var_dx = scope.Var("DX");
auto tensor_dx = var_dx->GetMutable<framework::LoDTensor>();
auto var_dy = scope.Var("DY");
auto tensor_dy = var_dy->GetMutable<framework::LoDTensor>();
tensor_dx->Resize({bsz_seq, output_size});
tensor_dy->Resize({output_size});
auto dout_ptr = tensor_dout->mutable_data<T>(ctx.GetPlace());
auto tensor_dy_ptr = tensor_dy->mutable_data<T>(ctx.GetPlace());
auto size_z = static_cast<size_t>(bsz_seq * output_size);
hipMemcpy(dout_ptr, dout_vec.data(), size_z * sizeof(T),
hipMemcpyHostToDevice);
int axis = -1;
bool use_mkldnn = false, use_quantizer = false;
std::string mkldnn_data_type = "float32";
std::string x_data_format = "", y_data_format = "";
float Scale_x = 1.0, Scale_y = 1.0, Scale_out = 1.0;
framework::AttributeMap attrs;
attrs.insert({"axis", axis});
attrs.insert({"use_mkldnn", use_mkldnn});
attrs.insert({"x_data_format", x_data_format});
attrs.insert({"y_data_format", y_data_format});
attrs.insert({"use_quantizer", use_quantizer});
attrs.insert({"mkldnn_data_type", mkldnn_data_type});
attrs.insert({"Scale_x", Scale_x});
attrs.insert({"Scale_y", Scale_y});
attrs.insert({"Scale_out", Scale_out});
auto op = framework::OpRegistry::CreateOp(
"elementwise_add_grad",
{{"Out@GRAD", {"DOut"}}, {"X", {"X"}}, {"Y", {"Y"}}},
{{"X@GRAD", {"DX"}}, {"Y@GRAD", {"DY"}}}, attrs);
op->Run(scope, ctx.GetPlace());
auto size_y = static_cast<size_t>(output_size);
hipMemcpy(dy_vec->data(), tensor_dy_ptr, size_y * sizeof(T),
hipMemcpyDeviceToHost);
ctx.Wait();
}
template <typename T>
class TestFeedForward {
public:
TestFeedForward() {
batch_size_ = 16;
seq_len_ = 128;
num_head_ = 16;
dim_head_ = 64;
dim_embed_ = 1024;
has_bias_ = false;
}
TestFeedForward(int batch_size, int seq_len, int num_head, int dim_head,
int dim_embed, bool has_bias) {
batch_size_ = batch_size;
seq_len_ = seq_len;
num_head_ = num_head;
dim_head_ = dim_head;
dim_embed_ = dim_embed;
has_bias_ = has_bias;
}
~TestFeedForward() { delete ctx_; }
void SetUp() {
bsz_seq_ = batch_size_ * seq_len_;
output_size_ = 3 * num_head_ * dim_head_;
input_size_ = dim_embed_;
ctx_ = new platform::CUDADeviceContext(place_);
size_src_ = bsz_seq_ * dim_embed_; // src: [bs, seq_len, em_dim]
size_weight_ = dim_embed_ * output_size_; // weight: [output_size, em_dim]
size_output_ =
bsz_seq_ * output_size_; // output: [bs, seq_len, output_size]
size_bias_ = output_size_;
base_out_vec_.resize(size_output_);
base_bias_out_vec_.resize(size_output_);
base_dinput_vec_.resize(size_src_);
base_dweight_vec_.resize(size_weight_);
base_dbias_vec_.resize(size_bias_);
src_vec_.resize(size_src_);
weight_vec_.resize(size_weight_);
bias_vec_.resize(size_bias_);
doutput_vec_.resize(size_output_);
std::default_random_engine random(time(NULL));
std::uniform_real_distribution<float> dis(0.0, 1.0);
for (int i = 0; i < size_src_; i++) {
src_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_weight_; i++) {
weight_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_bias_; i++) {
bias_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_output_; i++) {
doutput_vec_[i] = static_cast<T>(dis(random));
}
framework::TensorFromVector<T>(src_vec_, *ctx_, &src_);
src_.Resize({batch_size_, seq_len_, dim_embed_});
framework::TensorFromVector<T>(weight_vec_, *ctx_, &weight_);
weight_.Resize({output_size_, dim_embed_});
out_.Resize({batch_size_, seq_len_, output_size_});
out_.mutable_data<T>(place_);
if (has_bias_) {
framework::TensorFromVector<T>(bias_vec_, *ctx_, &bias_);
bias_.Resize({output_size_});
bias_out_.Resize({batch_size_, seq_len_, output_size_});
bias_out_.mutable_data<T>(place_);
}
framework::TensorFromVector<T>(doutput_vec_, *ctx_, &doutput_);
doutput_.Resize({batch_size_, seq_len_, output_size_});
dinput_.Resize({batch_size_, seq_len_, dim_embed_});
dinput_.mutable_data<T>(place_);
dweight_.Resize({output_size_, dim_embed_});
dweight_.mutable_data<T>(place_);
if (has_bias_) {
dbias_.Resize({output_size_});
dbias_.mutable_data<T>(place_);
}
}
void BaselineForward() {
bool transpose_a = false, transpose_b = true;
float alpha = 1;
GetLinearOp(src_vec_, weight_vec_, src_.dims(), weight_.dims(), *ctx_,
transpose_a, transpose_b, alpha, &base_out_vec_);
if (has_bias_) {
GetElementwiseAddOp(base_out_vec_, bias_vec_, bsz_seq_, output_size_,
*ctx_, &base_bias_out_vec_);
}
ctx_->Wait();
}
// get forward results of feedforward.
void FusedForward() {
T *p_weight = weight_.data<T>();
T *p_src = src_.data<T>();
T *p_output = out_.data<T>();
T *p_bias = nullptr;
T *p_bias_output = nullptr;
if (has_bias_) {
p_bias = bias_.data<T>();
p_bias_output = bias_out_.data<T>();
}
auto qkv_compute = paddle::operators::FeedForward<T>(
*ctx_, bsz_seq_, output_size_, input_size_, has_bias_);
qkv_compute.ComputeForward(p_weight, p_src, p_bias, p_output,
p_bias_output);
ctx_->Wait();
}
void BaselineBackward() {
bool transpose_a = false, transpose_b = true;
float alpha = 1;
GetLinearOpGrad(src_vec_, weight_vec_, doutput_vec_, src_.dims(),
weight_.dims(), out_.dims(), *ctx_, transpose_a,
transpose_b, alpha, &base_dinput_vec_, &base_dweight_vec_);
if (has_bias_) {
GetElementwiseAddOpGrad(doutput_vec_, bsz_seq_, output_size_, *ctx_,
&base_dbias_vec_);
}
ctx_->Wait();
}
// get backward results of feedforward.
void FusedBackward() {
T *p_weight = weight_.data<T>();
T *p_src = src_.data<T>();
T *p_doutput = doutput_.data<T>();
T *p_dinput = dinput_.data<T>();
T *p_dweight = dweight_.data<T>();
T *bias_ptr = nullptr;
if (has_bias_) {
bias_ptr = dbias_.data<T>();
}
auto qkv_compute = paddle::operators::FeedForward<T>(
*ctx_, bsz_seq_, output_size_, input_size_, has_bias_);
qkv_compute.ComputeBackward(p_src, p_weight, p_doutput, p_dinput, p_dweight,
bias_ptr);
ctx_->Wait();
}
void Run() {
SetUp();
BaselineForward();
FusedForward();
BaselineBackward();
FusedBackward();
}
// check forward correctness between baseline and results of feedforward.
void CheckOut(const T diff, bool is_relative_atol = false) {
std::vector<T> out(size_output_);
std::vector<T> bias_out(size_output_);
TensorToVector(out_, *ctx_, &out);
if (has_bias_) {
TensorToVector(bias_out_, *ctx_, &bias_out);
}
ctx_->Wait();
for (int i = 0; i < size_output_; i++) {
if (is_relative_atol) {
EXPECT_LT(std::abs((out[i] - base_out_vec_[i]) / base_out_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(out[i] - base_out_vec_[i]), diff);
}
if (has_bias_) {
if (is_relative_atol) {
EXPECT_LT(std::abs((bias_out[i] - base_bias_out_vec_[i]) /
base_bias_out_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(bias_out[i] - base_bias_out_vec_[i]), diff);
}
}
}
}
// check backward correctness between baseline and results of feedforward.
void CheckGrad(const T diff, bool is_relative_atol = false) {
std::vector<T> h_dinput(size_src_);
TensorToVector(dinput_, *ctx_, &h_dinput);
for (int i = 0; i < size_src_; i++) {
if (is_relative_atol) {
EXPECT_LT(
std::abs((h_dinput[i] - base_dinput_vec_[i]) / base_dinput_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dinput[i] - base_dinput_vec_[i]), diff);
}
}
std::vector<T> h_dweight(size_weight_);
TensorToVector(dweight_, *ctx_, &h_dweight);
for (int i = 0; i < size_weight_; i++) {
if (is_relative_atol) {
EXPECT_LT(std::abs((h_dweight[i] - base_dweight_vec_[i]) /
base_dweight_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dweight[i] - base_dweight_vec_[i]), diff);
}
}
if (has_bias_) {
std::vector<T> h_dbias(size_bias_);
TensorToVector(dbias_, *ctx_, &h_dbias);
for (int i = 0; i < size_bias_; i++) {
if (is_relative_atol) {
EXPECT_LT(
std::abs((h_dbias[i] - base_dbias_vec_[i]) / base_dbias_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dbias[i] - base_dbias_vec_[i]), diff);
}
}
}
}
private:
int batch_size_, seq_len_, num_head_, dim_head_, dim_embed_;
int bsz_seq_, output_size_, input_size_;
bool has_bias_;
int size_src_, size_weight_, size_bias_, size_output_;
framework::Tensor src_, weight_, bias_, out_, bias_out_;
framework::Tensor dinput_, dweight_, dbias_, doutput_;
std::vector<T> src_vec_, weight_vec_, bias_vec_, out_vec_, bias_out_vec_;
std::vector<T> dinput_vec_, dweight_vec_, dbias_vec_, doutput_vec_;
// results of baseline.
std::vector<T> base_out_vec_, base_bias_out_vec_;
std::vector<T> base_dinput_vec_, base_dweight_vec_, base_dbias_vec_;
platform::CUDAPlace place_;
platform::CUDADeviceContext *ctx_;
};
// test for fp32, fp16, fp32+bias and fp16+bias
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp32) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = false;
TestFeedForward<float> test(batch_size, seq_len, num_head, dim_head,
dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<float>(1e-5));
test.CheckGrad(static_cast<float>(1e-5));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp16) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = false;
TestFeedForward<paddle::platform::float16> test(
batch_size, seq_len, num_head, dim_head, dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<paddle::platform::float16>(1e-5));
test.CheckGrad(static_cast<paddle::platform::float16>(1e-5));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp32Bias) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = true;
TestFeedForward<float> test(batch_size, seq_len, num_head, dim_head,
dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<float>(1e-5));
test.CheckGrad(static_cast<float>(1e-3));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp16Bias) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = true;
TestFeedForward<paddle::platform::float16> test(
batch_size, seq_len, num_head, dim_head, dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<paddle::platform::float16>(1e-2));
test.CheckGrad(static_cast<paddle::platform::float16>(1e-2), true);
}
| 99206304004d9b3beed891876dba4e16f51ad374.cu | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <random>
#include <vector>
#include "gtest/gtest.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/fused/attn_feed_forward.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/float16.h"
namespace framework = paddle::framework;
namespace platform = paddle::platform;
USE_OP(matmul);
USE_OP(elementwise_add);
// get paddle matmul op results as baseline
template <typename T>
void GetLinearOp(const std::vector<T> &x, const std::vector<T> &y,
const framework::DDim &x_dim, const framework::DDim &y_dim,
const platform::CUDADeviceContext &ctx, bool transpose_a,
bool transpose_b, float alpha, std::vector<T> *out) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_out = scope.Var("Out");
auto tensor_out = var_out->GetMutable<framework::LoDTensor>();
tensor_x->Resize(x_dim);
tensor_y->Resize(y_dim);
tensor_out->Resize({x_dim[0], x_dim[1], y_dim[0]});
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto z_ptr = tensor_out->mutable_data<T>(ctx.GetPlace());
auto size_x = static_cast<size_t>(framework::product(x_dim));
auto size_y = static_cast<size_t>(framework::product(y_dim));
auto size_z = x_dim[0] * x_dim[1] * y_dim[0];
cudaMemcpy(x_ptr, x.data(), size_x * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(y_ptr, y.data(), size_y * sizeof(T), cudaMemcpyHostToDevice);
framework::AttributeMap attrs;
attrs.insert({"transpose_X", transpose_a});
attrs.insert({"transpose_Y", transpose_b});
attrs.insert({"alpha", alpha});
auto op = framework::OpRegistry::CreateOp(
"matmul", {{"X", {"X"}}, {"Y", {"Y"}}}, {{"Out", {"Out"}}}, attrs);
op->Run(scope, ctx.GetPlace());
cudaMemcpy(out->data(), z_ptr, size_z * sizeof(T), cudaMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle elementwise_add op results as baseline
template <typename T>
void GetElementwiseAddOp(const std::vector<T> &x, const std::vector<T> &y,
const int bsz_seq, const int output_size,
const platform::CUDADeviceContext &ctx,
std::vector<T> *out) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_out = scope.Var("Out");
auto tensor_out = var_out->GetMutable<framework::LoDTensor>();
tensor_x->Resize({bsz_seq, output_size});
tensor_y->Resize({output_size});
tensor_out->Resize({bsz_seq, output_size});
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto z_ptr = tensor_out->mutable_data<T>(ctx.GetPlace());
auto size_x = bsz_seq * output_size;
auto size_y = output_size;
auto size_z = bsz_seq * output_size;
cudaMemcpy(x_ptr, x.data(), size_x * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(y_ptr, y.data(), size_y * sizeof(T), cudaMemcpyHostToDevice);
framework::AttributeMap attrs;
auto op = framework::OpRegistry::CreateOp("elementwise_add",
{{"X", {"X"}}, {"Y", {"Y"}}},
{{"Out", {"Out"}}}, attrs);
op->Run(scope, ctx.GetPlace());
cudaMemcpy(out->data(), z_ptr, size_z * sizeof(T), cudaMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle matmul_grad op results as baseline
template <typename T>
void GetLinearOpGrad(const std::vector<T> &x_vec, const std::vector<T> &y_vec,
const std::vector<T> &dout_vec,
const framework::DDim &x_dim, const framework::DDim &y_dim,
const framework::DDim &out_dim,
const platform::CUDADeviceContext &ctx, bool transpose_a,
bool transpose_b, float alpha, std::vector<T> *dinput_vec,
std::vector<T> *dweight_vec) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_dout = scope.Var("DOut");
auto tensor_dout = var_dout->GetMutable<framework::LoDTensor>();
tensor_x->Resize(x_dim);
tensor_y->Resize(y_dim);
tensor_dout->Resize(out_dim);
auto var_dx = scope.Var("DX");
auto tensor_dx = var_dx->GetMutable<framework::LoDTensor>();
auto var_dy = scope.Var("DY");
auto tensor_dy = var_dy->GetMutable<framework::LoDTensor>();
tensor_dx->Resize(x_dim);
tensor_dy->Resize(y_dim);
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto dout_ptr = tensor_dout->mutable_data<T>(ctx.GetPlace());
auto dinput_ptr = tensor_dx->mutable_data<T>(ctx.GetPlace());
auto dweight_ptr = tensor_dy->mutable_data<T>(ctx.GetPlace());
auto size_x = static_cast<size_t>(framework::product(x_dim));
auto size_y = static_cast<size_t>(framework::product(y_dim));
auto size_z = x_dim[0] * x_dim[1] * y_dim[0];
cudaMemcpy(x_ptr, x_vec.data(), size_x * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(y_ptr, y_vec.data(), size_y * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(dout_ptr, dout_vec.data(), size_z * sizeof(T),
cudaMemcpyHostToDevice);
bool use_mkldnn = false;
std::vector<int> fused_reshape_X = {};
std::vector<int> fused_reshape_Y = {};
std::vector<int> fused_reshape_Out = {};
std::vector<int> fused_transpose_X = {};
std::vector<int> fused_transpose_Y = {};
std::vector<int> fused_transpose_Out = {};
bool use_quantizer = false, force_fp32_output = false;
std::string mkldnn_data_type = "float32";
float Scale_x = 1.0, Scale_y = 1.0, Scale_out = 1.0;
framework::AttributeMap attrs;
attrs.insert({"transpose_X", transpose_a});
attrs.insert({"transpose_Y", transpose_b});
attrs.insert({"alpha", alpha});
attrs.insert({"use_mkldnn", use_mkldnn});
attrs.insert({"fused_reshape_X", fused_reshape_X});
attrs.insert({"fused_reshape_Y", fused_reshape_Y});
attrs.insert({"fused_reshape_Out", fused_reshape_Out});
attrs.insert({"fused_transpose_X", fused_transpose_X});
attrs.insert({"fused_transpose_Y", fused_transpose_Y});
attrs.insert({"fused_transpose_Out", fused_transpose_Out});
attrs.insert({"use_quantizer", use_quantizer});
attrs.insert({"mkldnn_data_type", mkldnn_data_type});
attrs.insert({"Scale_x", Scale_x});
attrs.insert({"Scale_y", Scale_y});
attrs.insert({"Scale_out", Scale_out});
attrs.insert({"force_fp32_output", force_fp32_output});
auto op = framework::OpRegistry::CreateOp(
"matmul_grad", {{"Out@GRAD", {"DOut"}}, {"X", {"X"}}, {"Y", {"Y"}}},
{{"X@GRAD", {"DX"}}, {"Y@GRAD", {"DY"}}}, attrs);
op->Run(scope, ctx.GetPlace());
cudaMemcpy(dinput_vec->data(), dinput_ptr, size_x * sizeof(T),
cudaMemcpyDeviceToHost);
cudaMemcpy(dweight_vec->data(), dweight_ptr, size_y * sizeof(T),
cudaMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle elementwise_add_grad op results as baseline
template <typename T>
void GetElementwiseAddOpGrad(const std::vector<T> &dout_vec, const int bsz_seq,
const int output_size,
const platform::CUDADeviceContext &ctx,
std::vector<T> *dy_vec) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_dout = scope.Var("DOut");
auto tensor_dout = var_dout->GetMutable<framework::LoDTensor>();
tensor_x->Resize({bsz_seq, output_size});
tensor_y->Resize({output_size});
tensor_dout->Resize({bsz_seq, output_size});
auto var_dx = scope.Var("DX");
auto tensor_dx = var_dx->GetMutable<framework::LoDTensor>();
auto var_dy = scope.Var("DY");
auto tensor_dy = var_dy->GetMutable<framework::LoDTensor>();
tensor_dx->Resize({bsz_seq, output_size});
tensor_dy->Resize({output_size});
auto dout_ptr = tensor_dout->mutable_data<T>(ctx.GetPlace());
auto tensor_dy_ptr = tensor_dy->mutable_data<T>(ctx.GetPlace());
auto size_z = static_cast<size_t>(bsz_seq * output_size);
cudaMemcpy(dout_ptr, dout_vec.data(), size_z * sizeof(T),
cudaMemcpyHostToDevice);
int axis = -1;
bool use_mkldnn = false, use_quantizer = false;
std::string mkldnn_data_type = "float32";
std::string x_data_format = "", y_data_format = "";
float Scale_x = 1.0, Scale_y = 1.0, Scale_out = 1.0;
framework::AttributeMap attrs;
attrs.insert({"axis", axis});
attrs.insert({"use_mkldnn", use_mkldnn});
attrs.insert({"x_data_format", x_data_format});
attrs.insert({"y_data_format", y_data_format});
attrs.insert({"use_quantizer", use_quantizer});
attrs.insert({"mkldnn_data_type", mkldnn_data_type});
attrs.insert({"Scale_x", Scale_x});
attrs.insert({"Scale_y", Scale_y});
attrs.insert({"Scale_out", Scale_out});
auto op = framework::OpRegistry::CreateOp(
"elementwise_add_grad",
{{"Out@GRAD", {"DOut"}}, {"X", {"X"}}, {"Y", {"Y"}}},
{{"X@GRAD", {"DX"}}, {"Y@GRAD", {"DY"}}}, attrs);
op->Run(scope, ctx.GetPlace());
auto size_y = static_cast<size_t>(output_size);
cudaMemcpy(dy_vec->data(), tensor_dy_ptr, size_y * sizeof(T),
cudaMemcpyDeviceToHost);
ctx.Wait();
}
template <typename T>
class TestFeedForward {
public:
TestFeedForward() {
batch_size_ = 16;
seq_len_ = 128;
num_head_ = 16;
dim_head_ = 64;
dim_embed_ = 1024;
has_bias_ = false;
}
TestFeedForward(int batch_size, int seq_len, int num_head, int dim_head,
int dim_embed, bool has_bias) {
batch_size_ = batch_size;
seq_len_ = seq_len;
num_head_ = num_head;
dim_head_ = dim_head;
dim_embed_ = dim_embed;
has_bias_ = has_bias;
}
~TestFeedForward() { delete ctx_; }
void SetUp() {
bsz_seq_ = batch_size_ * seq_len_;
output_size_ = 3 * num_head_ * dim_head_;
input_size_ = dim_embed_;
ctx_ = new platform::CUDADeviceContext(place_);
size_src_ = bsz_seq_ * dim_embed_; // src: [bs, seq_len, em_dim]
size_weight_ = dim_embed_ * output_size_; // weight: [output_size, em_dim]
size_output_ =
bsz_seq_ * output_size_; // output: [bs, seq_len, output_size]
size_bias_ = output_size_;
base_out_vec_.resize(size_output_);
base_bias_out_vec_.resize(size_output_);
base_dinput_vec_.resize(size_src_);
base_dweight_vec_.resize(size_weight_);
base_dbias_vec_.resize(size_bias_);
src_vec_.resize(size_src_);
weight_vec_.resize(size_weight_);
bias_vec_.resize(size_bias_);
doutput_vec_.resize(size_output_);
std::default_random_engine random(time(NULL));
std::uniform_real_distribution<float> dis(0.0, 1.0);
for (int i = 0; i < size_src_; i++) {
src_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_weight_; i++) {
weight_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_bias_; i++) {
bias_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_output_; i++) {
doutput_vec_[i] = static_cast<T>(dis(random));
}
framework::TensorFromVector<T>(src_vec_, *ctx_, &src_);
src_.Resize({batch_size_, seq_len_, dim_embed_});
framework::TensorFromVector<T>(weight_vec_, *ctx_, &weight_);
weight_.Resize({output_size_, dim_embed_});
out_.Resize({batch_size_, seq_len_, output_size_});
out_.mutable_data<T>(place_);
if (has_bias_) {
framework::TensorFromVector<T>(bias_vec_, *ctx_, &bias_);
bias_.Resize({output_size_});
bias_out_.Resize({batch_size_, seq_len_, output_size_});
bias_out_.mutable_data<T>(place_);
}
framework::TensorFromVector<T>(doutput_vec_, *ctx_, &doutput_);
doutput_.Resize({batch_size_, seq_len_, output_size_});
dinput_.Resize({batch_size_, seq_len_, dim_embed_});
dinput_.mutable_data<T>(place_);
dweight_.Resize({output_size_, dim_embed_});
dweight_.mutable_data<T>(place_);
if (has_bias_) {
dbias_.Resize({output_size_});
dbias_.mutable_data<T>(place_);
}
}
void BaselineForward() {
bool transpose_a = false, transpose_b = true;
float alpha = 1;
GetLinearOp(src_vec_, weight_vec_, src_.dims(), weight_.dims(), *ctx_,
transpose_a, transpose_b, alpha, &base_out_vec_);
if (has_bias_) {
GetElementwiseAddOp(base_out_vec_, bias_vec_, bsz_seq_, output_size_,
*ctx_, &base_bias_out_vec_);
}
ctx_->Wait();
}
// get forward results of feedforward.
void FusedForward() {
T *p_weight = weight_.data<T>();
T *p_src = src_.data<T>();
T *p_output = out_.data<T>();
T *p_bias = nullptr;
T *p_bias_output = nullptr;
if (has_bias_) {
p_bias = bias_.data<T>();
p_bias_output = bias_out_.data<T>();
}
auto qkv_compute = paddle::operators::FeedForward<T>(
*ctx_, bsz_seq_, output_size_, input_size_, has_bias_);
qkv_compute.ComputeForward(p_weight, p_src, p_bias, p_output,
p_bias_output);
ctx_->Wait();
}
void BaselineBackward() {
bool transpose_a = false, transpose_b = true;
float alpha = 1;
GetLinearOpGrad(src_vec_, weight_vec_, doutput_vec_, src_.dims(),
weight_.dims(), out_.dims(), *ctx_, transpose_a,
transpose_b, alpha, &base_dinput_vec_, &base_dweight_vec_);
if (has_bias_) {
GetElementwiseAddOpGrad(doutput_vec_, bsz_seq_, output_size_, *ctx_,
&base_dbias_vec_);
}
ctx_->Wait();
}
// get backward results of feedforward.
void FusedBackward() {
T *p_weight = weight_.data<T>();
T *p_src = src_.data<T>();
T *p_doutput = doutput_.data<T>();
T *p_dinput = dinput_.data<T>();
T *p_dweight = dweight_.data<T>();
T *bias_ptr = nullptr;
if (has_bias_) {
bias_ptr = dbias_.data<T>();
}
auto qkv_compute = paddle::operators::FeedForward<T>(
*ctx_, bsz_seq_, output_size_, input_size_, has_bias_);
qkv_compute.ComputeBackward(p_src, p_weight, p_doutput, p_dinput, p_dweight,
bias_ptr);
ctx_->Wait();
}
void Run() {
SetUp();
BaselineForward();
FusedForward();
BaselineBackward();
FusedBackward();
}
// check forward correctness between baseline and results of feedforward.
void CheckOut(const T diff, bool is_relative_atol = false) {
std::vector<T> out(size_output_);
std::vector<T> bias_out(size_output_);
TensorToVector(out_, *ctx_, &out);
if (has_bias_) {
TensorToVector(bias_out_, *ctx_, &bias_out);
}
ctx_->Wait();
for (int i = 0; i < size_output_; i++) {
if (is_relative_atol) {
EXPECT_LT(std::abs((out[i] - base_out_vec_[i]) / base_out_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(out[i] - base_out_vec_[i]), diff);
}
if (has_bias_) {
if (is_relative_atol) {
EXPECT_LT(std::abs((bias_out[i] - base_bias_out_vec_[i]) /
base_bias_out_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(bias_out[i] - base_bias_out_vec_[i]), diff);
}
}
}
}
// check backward correctness between baseline and results of feedforward.
void CheckGrad(const T diff, bool is_relative_atol = false) {
std::vector<T> h_dinput(size_src_);
TensorToVector(dinput_, *ctx_, &h_dinput);
for (int i = 0; i < size_src_; i++) {
if (is_relative_atol) {
EXPECT_LT(
std::abs((h_dinput[i] - base_dinput_vec_[i]) / base_dinput_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dinput[i] - base_dinput_vec_[i]), diff);
}
}
std::vector<T> h_dweight(size_weight_);
TensorToVector(dweight_, *ctx_, &h_dweight);
for (int i = 0; i < size_weight_; i++) {
if (is_relative_atol) {
EXPECT_LT(std::abs((h_dweight[i] - base_dweight_vec_[i]) /
base_dweight_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dweight[i] - base_dweight_vec_[i]), diff);
}
}
if (has_bias_) {
std::vector<T> h_dbias(size_bias_);
TensorToVector(dbias_, *ctx_, &h_dbias);
for (int i = 0; i < size_bias_; i++) {
if (is_relative_atol) {
EXPECT_LT(
std::abs((h_dbias[i] - base_dbias_vec_[i]) / base_dbias_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dbias[i] - base_dbias_vec_[i]), diff);
}
}
}
}
private:
int batch_size_, seq_len_, num_head_, dim_head_, dim_embed_;
int bsz_seq_, output_size_, input_size_;
bool has_bias_;
int size_src_, size_weight_, size_bias_, size_output_;
framework::Tensor src_, weight_, bias_, out_, bias_out_;
framework::Tensor dinput_, dweight_, dbias_, doutput_;
std::vector<T> src_vec_, weight_vec_, bias_vec_, out_vec_, bias_out_vec_;
std::vector<T> dinput_vec_, dweight_vec_, dbias_vec_, doutput_vec_;
// results of baseline.
std::vector<T> base_out_vec_, base_bias_out_vec_;
std::vector<T> base_dinput_vec_, base_dweight_vec_, base_dbias_vec_;
platform::CUDAPlace place_;
platform::CUDADeviceContext *ctx_;
};
// test for fp32, fp16, fp32+bias and fp16+bias
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp32) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = false;
TestFeedForward<float> test(batch_size, seq_len, num_head, dim_head,
dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<float>(1e-5));
test.CheckGrad(static_cast<float>(1e-5));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp16) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = false;
TestFeedForward<paddle::platform::float16> test(
batch_size, seq_len, num_head, dim_head, dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<paddle::platform::float16>(1e-5));
test.CheckGrad(static_cast<paddle::platform::float16>(1e-5));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp32Bias) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = true;
TestFeedForward<float> test(batch_size, seq_len, num_head, dim_head,
dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<float>(1e-5));
test.CheckGrad(static_cast<float>(1e-3));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp16Bias) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = true;
TestFeedForward<paddle::platform::float16> test(
batch_size, seq_len, num_head, dim_head, dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<paddle::platform::float16>(1e-2));
test.CheckGrad(static_cast<paddle::platform::float16>(1e-2), true);
}
|
036be76d0855ef5bff62f5e51ecda12ff03f2c8c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "magma_manager.h"
#include "../../cuda_err_check.h"
magma_manager::magma_manager() :
n_(-1),
num_batches_(-1),
factored_(false)
{
magma_init();
int device_id;
magma_getdevice(&device_id);
magma_queue_create(device_id, &magma_queue_);
}
magma_manager::~magma_manager()
{
if(factored_) {
FreeDeviceMemory();
}
magma_queue_destroy(magma_queue_);
magma_finalize();
}
void magma_manager::setup_memory()
{
if(factored_) {
FreeDeviceMemory();
}
AllocateDeviceMemory();
}
void magma_manager::AllocateDeviceMemory()
{
hipDeviceSynchronize();
cuda_err_check(hipGetLastError());
cuda_err_check(hipMalloc((void**)&matrix_inverse_dev_,sizeof(double)*(n_*n_*num_batches_)));
cuda_err_check(hipMalloc((void**)&matrix_inverse_pointers_dev_,sizeof(double*)*num_batches_));
cuda_err_check(hipMalloc((void**)&matrix_pointers_dev_,sizeof(double*)*num_batches_));
cuda_err_check(hipMalloc((void**)&info_dev_,sizeof(int)*num_batches_));
cuda_err_check(hipMalloc((void**)&ipiv_dev_,sizeof(int)*n_*num_batches_));
cuda_err_check(hipMalloc((void**)&tmp_dev_,sizeof(double)*num_batches_*n_));
cuda_err_check(hipMalloc((void**)&tmp_pointers_dev_,sizeof(double*)*num_batches_));
cuda_err_check(hipMalloc((void**)&ipiv_pointers_dev_,sizeof(int*)*num_batches_));
data_ptrs_.resize(num_batches_);
std::vector<int*> tmpi_ptrs(num_batches_);
std::vector<double*> tmp_ptrs(num_batches_);
for(int j = 0; j < num_batches_; ++j) {
data_ptrs_[j] = matrix_inverse_dev_ + j*n_*n_;
}
hipMemcpy(matrix_inverse_pointers_dev_, data_ptrs_.data(), sizeof(double*)*num_batches_, hipMemcpyHostToDevice);
cuda_err_check(hipGetLastError());
for(int j = 0; j < num_batches_; ++j) {
tmp_ptrs[j] = tmp_dev_ + j*n_;
}
hipMemcpy(tmp_pointers_dev_, tmp_ptrs.data(), sizeof(double*)*num_batches_, hipMemcpyHostToDevice);
cuda_err_check(hipGetLastError());
for(int j = 0; j < num_batches_; ++j) {
tmpi_ptrs[j] = ipiv_dev_ + j*n_;
}
hipMemcpy(ipiv_pointers_dev_, tmpi_ptrs.data(), sizeof(int*)*num_batches_, hipMemcpyHostToDevice);
cuda_err_check(hipGetLastError());
}
void magma_manager::FreeDeviceMemory()
{
hipFree(matrix_inverse_dev_);
hipFree(matrix_inverse_pointers_dev_);
hipFree(matrix_pointers_dev_);
hipFree(info_dev_);
hipFree(ipiv_dev_);
hipFree(ipiv_pointers_dev_);
hipFree(tmp_dev_);
hipFree(tmp_pointers_dev_);
}
int magma_manager::factor_invert(int num_batches, int n, double* values) {
if(n != n_ || num_batches != num_batches_) {
n_ = n;
num_batches_ = num_batches;
setup_memory();
}
if(values == NULL) {
return 1;
}
bool need_tx = false;
for(int j = 0; j < num_batches_; ++j) {
if(data_ptrs_[j] != values + j*n_*n_) {
data_ptrs_[j] = values + j*n_*n_;
need_tx = true;
}
}
if(need_tx) {
hipMemcpy(matrix_pointers_dev_, data_ptrs_.data(), sizeof(double*)*num_batches_, hipMemcpyHostToDevice);
}
magma_dgetrf_batched(n_, /* number of rows per block */
n_, /* number of columns per block */
matrix_pointers_dev_,
n_, /* leading dimension of each block */
ipiv_pointers_dev_,
info_dev_,
num_batches_,
magma_queue_);
magma_dgetri_outofplace_batched(n_, /* order of block */
matrix_pointers_dev_,
n_, /* leading dimension of each block */
ipiv_pointers_dev_,
matrix_inverse_pointers_dev_,
n_, /* leading dimension of each block of inverse */
info_dev_,
num_batches_,
magma_queue_);
int ierr = 0;
#ifdef ZERORK_FULL_DEBUG
info_.resize(num_batches_);
cuda_err_check(hipMemcpy(info_.data(), info_dev_, num_batches_*sizeof(int), hipMemcpyDeviceToHost));
//Check for errors
// factor_error > 0, singular matrix, zero diagonal at row,col = factor_error
// factor_error = 0, success
// factor_error < 0, illegal input
for(int i=0; i < num_batches_; ++i) {
if(info_[i]!=0) {
ierr = info_[i];
break;
}
}
#endif
factored_ = true;
return ierr;
}
int magma_manager::factor_lu(int num_batches, int n, double* values) {
if(n != n_ || num_batches != num_batches_) {
n_ = n;
num_batches_ = num_batches;
setup_memory();
}
if(values == NULL) {
return 1;
}
bool need_tx = false;
for(int j = 0; j < num_batches_; ++j) {
if(data_ptrs_[j] != values + j*n_*n_) {
data_ptrs_[j] = values + j*n_*n_;
need_tx = true;
}
}
if(need_tx) {
hipMemcpy(matrix_pointers_dev_, data_ptrs_.data(), sizeof(double*)*num_batches_, hipMemcpyHostToDevice);
}
magma_dgetrf_batched(n_, /* number of rows per block */
n_, /* number of columns per block */
matrix_pointers_dev_,
n_, /* leading dimension of each block */
ipiv_pointers_dev_,
info_dev_,
num_batches_,
magma_queue_);
int ierr = 0;
#ifdef ZERORK_FULL_DEBUG
info_.resize(num_batches_);
cuda_err_check(hipMemcpy(info_.data(), info_dev_, num_batches_*sizeof(int), hipMemcpyDeviceToHost));
//Check for errors
// factor_error > 0, singular matrix, zero diagonal at row,col = factor_error
// factor_error = 0, success
// factor_error < 0, illegal input
for(int i=0; i < num_batches_; ++i) {
if(info_[i]!=0) {
ierr = info_[i];
break;
}
}
#endif
factored_ = true;
return ierr;
}
//The following modified from cuda sdk-5.0
#define TRANSPOSE_TILE_DIM 32
#define TRANSPOSE_BLOCK_ROWS 8
static __global__ void MAGMA_MANAGER_TransposeNoBankConflicts(double *odata, const double *idata, const int width, const int height)
{
__shared__ double tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1];
int xIndex,yIndex,index_in,index_out;
xIndex = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.x;
yIndex = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.y;
index_in = xIndex + (yIndex)*width;
for (int i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if(xIndex < width && yIndex+i < height){
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];}
}
__syncthreads();
xIndex = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.y;
index_out = xIndex + (yIndex)*height;
for (int i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if(yIndex+i < width && xIndex < height){
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];}
}
}
void magma_manager::cuda_transpose(double* odata, const double* idata, const int width, const int height)
{
// Put df/dy in "normal" order
dim3 nBlocks2D,nThreads2D;
nThreads2D.x = TRANSPOSE_TILE_DIM;
nThreads2D.y = TRANSPOSE_BLOCK_ROWS;
nBlocks2D.x = (width+TRANSPOSE_TILE_DIM-1)/TRANSPOSE_TILE_DIM;
nBlocks2D.y = (height+TRANSPOSE_TILE_DIM-1)/TRANSPOSE_TILE_DIM;
hipLaunchKernelGGL(( MAGMA_MANAGER_TransposeNoBankConflicts), dim3(nBlocks2D),dim3(nThreads2D), 0, 0, odata,idata,width,height);
#ifdef ZERORK_FULL_DEBUG
cuda_err_check( hipPeekAtLastError() );
cuda_err_check( hipDeviceSynchronize() );
#endif
}
static void __global__ MAGMA_MANAGER_cuda_bdmv_kernel
(
const int mtx_block_size,
const int num_mtx_blocks,
const double* A_dev,
const double* X_dev ,
double * Y_dev
)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int stride = gridDim.x*blockDim.x;
for( ; tidx < num_mtx_blocks*mtx_block_size; tidx += stride)
{
int local_row = tidx % mtx_block_size;
int local_block = tidx / mtx_block_size;
double Y_dev_accum = 0.0;
for(int i = 0; i < mtx_block_size; ++i) //columns
{
int data_idx = mtx_block_size*mtx_block_size*local_block + mtx_block_size*i + local_row;
Y_dev_accum += A_dev[data_idx]*X_dev[i+local_block*mtx_block_size];
}
Y_dev[local_row+local_block*mtx_block_size] = Y_dev_accum;
}
}
int magma_manager::cuda_bdmv(int n, int nbatch, double* A_dev, double* B_dev, double* Y_dev)
{
int threads = ::min(n*nbatch,1024);
int blocks=(nbatch*n+threads-1)/threads;
hipLaunchKernelGGL(( MAGMA_MANAGER_cuda_bdmv_kernel), dim3(blocks),dim3(threads), 0, 0, n, nbatch, A_dev, B_dev, Y_dev);
#ifdef ZERORK_FULL_DEBUG
cuda_err_check(hipPeekAtLastError());
cuda_err_check(hipDeviceSynchronize());
#endif
return 0;
}
int magma_manager::solve_invert(int num_batches, int n, const double* rhs, double* soln) {
if(n != n_ || num_batches != num_batches_) {
return 1;
}
// Transpose rhs into soln
cuda_transpose(soln,rhs,num_batches_,n_);
// Block-diagonal matrix vector multiplication
cuda_bdmv(n_, num_batches_, matrix_inverse_dev_, soln, tmp_dev_);
// Put tmp back into block order
cuda_transpose(soln,tmp_dev_,n_,num_batches_);
return(0);
}
int magma_manager::solve_lu(int num_batches, int n, const double* rhs, double* soln) {
if(n != n_ || num_batches != num_batches_) {
return 1;
}
// Transpose rhs into tmp_dev_
cuda_transpose(tmp_dev_,rhs,num_batches_,n_);
// Magma forward and back substitution
magma_dgetrs_batched(MagmaNoTrans,
n_, /* order of the matrix */
1, /* number of right hand sides */
matrix_pointers_dev_,
n_, /* leading dimension of A */
ipiv_pointers_dev_,
tmp_pointers_dev_, /* right hand side (input), solution (output) */
n_, /* leading dimension of b */
num_batches_,
magma_queue_);
// Put tmp back into block order
cuda_transpose(soln,tmp_dev_,n_,num_batches_);
return(0);
}
| 036be76d0855ef5bff62f5e51ecda12ff03f2c8c.cu | #include "magma_manager.h"
#include "../../cuda_err_check.h"
magma_manager::magma_manager() :
n_(-1),
num_batches_(-1),
factored_(false)
{
magma_init();
int device_id;
magma_getdevice(&device_id);
magma_queue_create(device_id, &magma_queue_);
}
magma_manager::~magma_manager()
{
if(factored_) {
FreeDeviceMemory();
}
magma_queue_destroy(magma_queue_);
magma_finalize();
}
void magma_manager::setup_memory()
{
if(factored_) {
FreeDeviceMemory();
}
AllocateDeviceMemory();
}
void magma_manager::AllocateDeviceMemory()
{
cudaDeviceSynchronize();
cuda_err_check(cudaGetLastError());
cuda_err_check(cudaMalloc((void**)&matrix_inverse_dev_,sizeof(double)*(n_*n_*num_batches_)));
cuda_err_check(cudaMalloc((void**)&matrix_inverse_pointers_dev_,sizeof(double*)*num_batches_));
cuda_err_check(cudaMalloc((void**)&matrix_pointers_dev_,sizeof(double*)*num_batches_));
cuda_err_check(cudaMalloc((void**)&info_dev_,sizeof(int)*num_batches_));
cuda_err_check(cudaMalloc((void**)&ipiv_dev_,sizeof(int)*n_*num_batches_));
cuda_err_check(cudaMalloc((void**)&tmp_dev_,sizeof(double)*num_batches_*n_));
cuda_err_check(cudaMalloc((void**)&tmp_pointers_dev_,sizeof(double*)*num_batches_));
cuda_err_check(cudaMalloc((void**)&ipiv_pointers_dev_,sizeof(int*)*num_batches_));
data_ptrs_.resize(num_batches_);
std::vector<int*> tmpi_ptrs(num_batches_);
std::vector<double*> tmp_ptrs(num_batches_);
for(int j = 0; j < num_batches_; ++j) {
data_ptrs_[j] = matrix_inverse_dev_ + j*n_*n_;
}
cudaMemcpy(matrix_inverse_pointers_dev_, data_ptrs_.data(), sizeof(double*)*num_batches_, cudaMemcpyHostToDevice);
cuda_err_check(cudaGetLastError());
for(int j = 0; j < num_batches_; ++j) {
tmp_ptrs[j] = tmp_dev_ + j*n_;
}
cudaMemcpy(tmp_pointers_dev_, tmp_ptrs.data(), sizeof(double*)*num_batches_, cudaMemcpyHostToDevice);
cuda_err_check(cudaGetLastError());
for(int j = 0; j < num_batches_; ++j) {
tmpi_ptrs[j] = ipiv_dev_ + j*n_;
}
cudaMemcpy(ipiv_pointers_dev_, tmpi_ptrs.data(), sizeof(int*)*num_batches_, cudaMemcpyHostToDevice);
cuda_err_check(cudaGetLastError());
}
void magma_manager::FreeDeviceMemory()
{
cudaFree(matrix_inverse_dev_);
cudaFree(matrix_inverse_pointers_dev_);
cudaFree(matrix_pointers_dev_);
cudaFree(info_dev_);
cudaFree(ipiv_dev_);
cudaFree(ipiv_pointers_dev_);
cudaFree(tmp_dev_);
cudaFree(tmp_pointers_dev_);
}
int magma_manager::factor_invert(int num_batches, int n, double* values) {
if(n != n_ || num_batches != num_batches_) {
n_ = n;
num_batches_ = num_batches;
setup_memory();
}
if(values == NULL) {
return 1;
}
bool need_tx = false;
for(int j = 0; j < num_batches_; ++j) {
if(data_ptrs_[j] != values + j*n_*n_) {
data_ptrs_[j] = values + j*n_*n_;
need_tx = true;
}
}
if(need_tx) {
cudaMemcpy(matrix_pointers_dev_, data_ptrs_.data(), sizeof(double*)*num_batches_, cudaMemcpyHostToDevice);
}
magma_dgetrf_batched(n_, /* number of rows per block */
n_, /* number of columns per block */
matrix_pointers_dev_,
n_, /* leading dimension of each block */
ipiv_pointers_dev_,
info_dev_,
num_batches_,
magma_queue_);
magma_dgetri_outofplace_batched(n_, /* order of block */
matrix_pointers_dev_,
n_, /* leading dimension of each block */
ipiv_pointers_dev_,
matrix_inverse_pointers_dev_,
n_, /* leading dimension of each block of inverse */
info_dev_,
num_batches_,
magma_queue_);
int ierr = 0;
#ifdef ZERORK_FULL_DEBUG
info_.resize(num_batches_);
cuda_err_check(cudaMemcpy(info_.data(), info_dev_, num_batches_*sizeof(int), cudaMemcpyDeviceToHost));
//Check for errors
// factor_error > 0, singular matrix, zero diagonal at row,col = factor_error
// factor_error = 0, success
// factor_error < 0, illegal input
for(int i=0; i < num_batches_; ++i) {
if(info_[i]!=0) {
ierr = info_[i];
break;
}
}
#endif
factored_ = true;
return ierr;
}
int magma_manager::factor_lu(int num_batches, int n, double* values) {
if(n != n_ || num_batches != num_batches_) {
n_ = n;
num_batches_ = num_batches;
setup_memory();
}
if(values == NULL) {
return 1;
}
bool need_tx = false;
for(int j = 0; j < num_batches_; ++j) {
if(data_ptrs_[j] != values + j*n_*n_) {
data_ptrs_[j] = values + j*n_*n_;
need_tx = true;
}
}
if(need_tx) {
cudaMemcpy(matrix_pointers_dev_, data_ptrs_.data(), sizeof(double*)*num_batches_, cudaMemcpyHostToDevice);
}
magma_dgetrf_batched(n_, /* number of rows per block */
n_, /* number of columns per block */
matrix_pointers_dev_,
n_, /* leading dimension of each block */
ipiv_pointers_dev_,
info_dev_,
num_batches_,
magma_queue_);
int ierr = 0;
#ifdef ZERORK_FULL_DEBUG
info_.resize(num_batches_);
cuda_err_check(cudaMemcpy(info_.data(), info_dev_, num_batches_*sizeof(int), cudaMemcpyDeviceToHost));
//Check for errors
// factor_error > 0, singular matrix, zero diagonal at row,col = factor_error
// factor_error = 0, success
// factor_error < 0, illegal input
for(int i=0; i < num_batches_; ++i) {
if(info_[i]!=0) {
ierr = info_[i];
break;
}
}
#endif
factored_ = true;
return ierr;
}
//The following modified from cuda sdk-5.0
#define TRANSPOSE_TILE_DIM 32
#define TRANSPOSE_BLOCK_ROWS 8
static __global__ void MAGMA_MANAGER_TransposeNoBankConflicts(double *odata, const double *idata, const int width, const int height)
{
__shared__ double tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1];
int xIndex,yIndex,index_in,index_out;
xIndex = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.x;
yIndex = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.y;
index_in = xIndex + (yIndex)*width;
for (int i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if(xIndex < width && yIndex+i < height){
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];}
}
__syncthreads();
xIndex = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.y;
index_out = xIndex + (yIndex)*height;
for (int i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if(yIndex+i < width && xIndex < height){
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];}
}
}
void magma_manager::cuda_transpose(double* odata, const double* idata, const int width, const int height)
{
// Put df/dy in "normal" order
dim3 nBlocks2D,nThreads2D;
nThreads2D.x = TRANSPOSE_TILE_DIM;
nThreads2D.y = TRANSPOSE_BLOCK_ROWS;
nBlocks2D.x = (width+TRANSPOSE_TILE_DIM-1)/TRANSPOSE_TILE_DIM;
nBlocks2D.y = (height+TRANSPOSE_TILE_DIM-1)/TRANSPOSE_TILE_DIM;
MAGMA_MANAGER_TransposeNoBankConflicts<<<nBlocks2D,nThreads2D>>>(odata,idata,width,height);
#ifdef ZERORK_FULL_DEBUG
cuda_err_check( cudaPeekAtLastError() );
cuda_err_check( cudaDeviceSynchronize() );
#endif
}
static void __global__ MAGMA_MANAGER_cuda_bdmv_kernel
(
const int mtx_block_size,
const int num_mtx_blocks,
const double* A_dev,
const double* X_dev ,
double * Y_dev
)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int stride = gridDim.x*blockDim.x;
for( ; tidx < num_mtx_blocks*mtx_block_size; tidx += stride)
{
int local_row = tidx % mtx_block_size;
int local_block = tidx / mtx_block_size;
double Y_dev_accum = 0.0;
for(int i = 0; i < mtx_block_size; ++i) //columns
{
int data_idx = mtx_block_size*mtx_block_size*local_block + mtx_block_size*i + local_row;
Y_dev_accum += A_dev[data_idx]*X_dev[i+local_block*mtx_block_size];
}
Y_dev[local_row+local_block*mtx_block_size] = Y_dev_accum;
}
}
int magma_manager::cuda_bdmv(int n, int nbatch, double* A_dev, double* B_dev, double* Y_dev)
{
int threads = std::min(n*nbatch,1024);
int blocks=(nbatch*n+threads-1)/threads;
MAGMA_MANAGER_cuda_bdmv_kernel<<<blocks,threads>>>(n, nbatch, A_dev, B_dev, Y_dev);
#ifdef ZERORK_FULL_DEBUG
cuda_err_check(cudaPeekAtLastError());
cuda_err_check(cudaDeviceSynchronize());
#endif
return 0;
}
int magma_manager::solve_invert(int num_batches, int n, const double* rhs, double* soln) {
if(n != n_ || num_batches != num_batches_) {
return 1;
}
// Transpose rhs into soln
cuda_transpose(soln,rhs,num_batches_,n_);
// Block-diagonal matrix vector multiplication
cuda_bdmv(n_, num_batches_, matrix_inverse_dev_, soln, tmp_dev_);
// Put tmp back into block order
cuda_transpose(soln,tmp_dev_,n_,num_batches_);
return(0);
}
int magma_manager::solve_lu(int num_batches, int n, const double* rhs, double* soln) {
if(n != n_ || num_batches != num_batches_) {
return 1;
}
// Transpose rhs into tmp_dev_
cuda_transpose(tmp_dev_,rhs,num_batches_,n_);
// Magma forward and back substitution
magma_dgetrs_batched(MagmaNoTrans,
n_, /* order of the matrix */
1, /* number of right hand sides */
matrix_pointers_dev_,
n_, /* leading dimension of A */
ipiv_pointers_dev_,
tmp_pointers_dev_, /* right hand side (input), solution (output) */
n_, /* leading dimension of b */
num_batches_,
magma_queue_);
// Put tmp back into block order
cuda_transpose(soln,tmp_dev_,n_,num_batches_);
return(0);
}
|
026b1386a346b9fc411eff4ac259c1efb0bcc0ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_cell_kernel3_zdir;
int xdim0_advec_cell_kernel3_zdir_h = -1;
__constant__ int ydim0_advec_cell_kernel3_zdir;
int ydim0_advec_cell_kernel3_zdir_h = -1;
__constant__ int xdim1_advec_cell_kernel3_zdir;
int xdim1_advec_cell_kernel3_zdir_h = -1;
__constant__ int ydim1_advec_cell_kernel3_zdir;
int ydim1_advec_cell_kernel3_zdir_h = -1;
__constant__ int xdim2_advec_cell_kernel3_zdir;
int xdim2_advec_cell_kernel3_zdir_h = -1;
__constant__ int ydim2_advec_cell_kernel3_zdir;
int ydim2_advec_cell_kernel3_zdir_h = -1;
__constant__ int xdim3_advec_cell_kernel3_zdir;
int xdim3_advec_cell_kernel3_zdir_h = -1;
__constant__ int ydim3_advec_cell_kernel3_zdir;
int ydim3_advec_cell_kernel3_zdir_h = -1;
__constant__ int xdim4_advec_cell_kernel3_zdir;
int xdim4_advec_cell_kernel3_zdir_h = -1;
__constant__ int ydim4_advec_cell_kernel3_zdir;
int ydim4_advec_cell_kernel3_zdir_h = -1;
__constant__ int xdim5_advec_cell_kernel3_zdir;
int xdim5_advec_cell_kernel3_zdir_h = -1;
__constant__ int ydim5_advec_cell_kernel3_zdir;
int ydim5_advec_cell_kernel3_zdir_h = -1;
__constant__ int xdim6_advec_cell_kernel3_zdir;
int xdim6_advec_cell_kernel3_zdir_h = -1;
__constant__ int ydim6_advec_cell_kernel3_zdir;
int ydim6_advec_cell_kernel3_zdir_h = -1;
__constant__ int xdim7_advec_cell_kernel3_zdir;
int xdim7_advec_cell_kernel3_zdir_h = -1;
__constant__ int ydim7_advec_cell_kernel3_zdir;
int ydim7_advec_cell_kernel3_zdir_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_cell_kernel3_zdir * (y) + \
xdim0_advec_cell_kernel3_zdir * ydim0_advec_cell_kernel3_zdir * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_cell_kernel3_zdir * (y) + \
xdim1_advec_cell_kernel3_zdir * ydim1_advec_cell_kernel3_zdir * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_advec_cell_kernel3_zdir * (y) + \
xdim2_advec_cell_kernel3_zdir * ydim2_advec_cell_kernel3_zdir * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_advec_cell_kernel3_zdir * (y) + \
xdim3_advec_cell_kernel3_zdir * ydim3_advec_cell_kernel3_zdir * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_advec_cell_kernel3_zdir * (y) + \
xdim4_advec_cell_kernel3_zdir * ydim4_advec_cell_kernel3_zdir * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_advec_cell_kernel3_zdir * (y) + \
xdim5_advec_cell_kernel3_zdir * ydim5_advec_cell_kernel3_zdir * (z))
#define OPS_ACC6(x, y, z) \
(x + xdim6_advec_cell_kernel3_zdir * (y) + \
xdim6_advec_cell_kernel3_zdir * ydim6_advec_cell_kernel3_zdir * (z))
#define OPS_ACC7(x, y, z) \
(x + xdim7_advec_cell_kernel3_zdir * (y) + \
xdim7_advec_cell_kernel3_zdir * ydim7_advec_cell_kernel3_zdir * (z))
// user function
__device__
inline void
advec_cell_kernel3_zdir_gpu(const double *vol_flux_z, const double *pre_vol,
const int *zz, const double *vertexdz,
const double *density1, const double *energy1,
double *mass_flux_z, double *ener_flux) {
double sigmat, sigmav, sigmam, sigma3, sigma4;
double diffuw, diffdw, limiter;
double one_by_six = 1.0 / 6.0;
int z_max = field.z_max;
int upwind, donor, downwind, dif;
if (vol_flux_z[OPS_ACC0(0, 0, 0)] > 0.0) {
upwind = -2;
donor = -1;
downwind = 0;
dif = donor;
} else if (zz[OPS_ACC2(0, 0, 1)] < z_max + 2 - 2) {
upwind = 1;
donor = 0;
downwind = -1;
dif = upwind;
} else {
upwind = 0;
donor = 0;
downwind = -1;
dif = upwind;
}
sigmat = fabs(vol_flux_z[OPS_ACC0(0, 0, 0)]) / pre_vol[OPS_ACC1(0, 0, donor)];
sigma3 = (1.0 + sigmat) *
(vertexdz[OPS_ACC3(0, 0, 0)] / vertexdz[OPS_ACC3(0, 0, dif)]);
sigma4 = 2.0 - sigmat;
sigmav = sigmat;
diffuw = density1[OPS_ACC4(0, 0, donor)] - density1[OPS_ACC4(0, 0, upwind)];
diffdw = density1[OPS_ACC4(0, 0, downwind)] - density1[OPS_ACC4(0, 0, donor)];
if ((diffuw * diffdw) > 0.0)
limiter = (1.0 - sigmav) * SIGN(1.0, diffdw) *
MIN(MIN(fabs(diffuw), fabs(diffdw)),
one_by_six * (sigma3 * fabs(diffuw) + sigma4 * fabs(diffdw)));
else
limiter = 0.0;
mass_flux_z[OPS_ACC6(0, 0, 0)] = vol_flux_z[OPS_ACC0(0, 0, 0)] *
(density1[OPS_ACC4(0, 0, donor)] + limiter);
sigmam = fabs(mass_flux_z[OPS_ACC6(0, 0, 0)]) /
(density1[OPS_ACC4(0, 0, donor)] * pre_vol[OPS_ACC1(0, 0, donor)]);
diffuw = energy1[OPS_ACC5(0, 0, donor)] - energy1[OPS_ACC5(0, 0, upwind)];
diffdw = energy1[OPS_ACC5(0, 0, downwind)] - energy1[OPS_ACC5(0, 0, donor)];
if ((diffuw * diffdw) > 0.0)
limiter = (1.0 - sigmam) * SIGN(1.0, diffdw) *
MIN(MIN(fabs(diffuw), fabs(diffdw)),
one_by_six * (sigma3 * fabs(diffuw) + sigma4 * fabs(diffdw)));
else
limiter = 0.0;
ener_flux[OPS_ACC7(0, 0, 0)] = mass_flux_z[OPS_ACC6(0, 0, 0)] *
(energy1[OPS_ACC5(0, 0, donor)] + limiter);
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
__global__ void ops_advec_cell_kernel3_zdir(
const double *__restrict arg0, const double *__restrict arg1,
const int *__restrict arg2, const double *__restrict arg3,
const double *__restrict arg4, const double *__restrict arg5,
double *__restrict arg6, double *__restrict arg7, int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_cell_kernel3_zdir +
idx_z * 1 * 1 * xdim0_advec_cell_kernel3_zdir *
ydim0_advec_cell_kernel3_zdir;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_cell_kernel3_zdir +
idx_z * 1 * 1 * xdim1_advec_cell_kernel3_zdir *
ydim1_advec_cell_kernel3_zdir;
arg2 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim2_advec_cell_kernel3_zdir +
idx_z * 1 * 1 * xdim2_advec_cell_kernel3_zdir *
ydim2_advec_cell_kernel3_zdir;
arg3 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim3_advec_cell_kernel3_zdir +
idx_z * 1 * 1 * xdim3_advec_cell_kernel3_zdir *
ydim3_advec_cell_kernel3_zdir;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_cell_kernel3_zdir +
idx_z * 1 * 1 * xdim4_advec_cell_kernel3_zdir *
ydim4_advec_cell_kernel3_zdir;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_advec_cell_kernel3_zdir +
idx_z * 1 * 1 * xdim5_advec_cell_kernel3_zdir *
ydim5_advec_cell_kernel3_zdir;
arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_advec_cell_kernel3_zdir +
idx_z * 1 * 1 * xdim6_advec_cell_kernel3_zdir *
ydim6_advec_cell_kernel3_zdir;
arg7 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim7_advec_cell_kernel3_zdir +
idx_z * 1 * 1 * xdim7_advec_cell_kernel3_zdir *
ydim7_advec_cell_kernel3_zdir;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_cell_kernel3_zdir_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_cell_kernel3_zdir(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6,
ops_arg arg7) {
#else
void ops_par_loop_advec_cell_kernel3_zdir_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 8, range, 118))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(118, "advec_cell_kernel3_zdir");
OPS_kernels[118].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0];
int ydim7 = args[7].dat->size[1];
if (xdim0 != xdim0_advec_cell_kernel3_zdir_h ||
ydim0 != ydim0_advec_cell_kernel3_zdir_h ||
xdim1 != xdim1_advec_cell_kernel3_zdir_h ||
ydim1 != ydim1_advec_cell_kernel3_zdir_h ||
xdim2 != xdim2_advec_cell_kernel3_zdir_h ||
ydim2 != ydim2_advec_cell_kernel3_zdir_h ||
xdim3 != xdim3_advec_cell_kernel3_zdir_h ||
ydim3 != ydim3_advec_cell_kernel3_zdir_h ||
xdim4 != xdim4_advec_cell_kernel3_zdir_h ||
ydim4 != ydim4_advec_cell_kernel3_zdir_h ||
xdim5 != xdim5_advec_cell_kernel3_zdir_h ||
ydim5 != ydim5_advec_cell_kernel3_zdir_h ||
xdim6 != xdim6_advec_cell_kernel3_zdir_h ||
ydim6 != ydim6_advec_cell_kernel3_zdir_h ||
xdim7 != xdim7_advec_cell_kernel3_zdir_h ||
ydim7 != ydim7_advec_cell_kernel3_zdir_h) {
hipMemcpyToSymbol(xdim0_advec_cell_kernel3_zdir, &xdim0, sizeof(int));
xdim0_advec_cell_kernel3_zdir_h = xdim0;
hipMemcpyToSymbol(ydim0_advec_cell_kernel3_zdir, &ydim0, sizeof(int));
ydim0_advec_cell_kernel3_zdir_h = ydim0;
hipMemcpyToSymbol(xdim1_advec_cell_kernel3_zdir, &xdim1, sizeof(int));
xdim1_advec_cell_kernel3_zdir_h = xdim1;
hipMemcpyToSymbol(ydim1_advec_cell_kernel3_zdir, &ydim1, sizeof(int));
ydim1_advec_cell_kernel3_zdir_h = ydim1;
hipMemcpyToSymbol(xdim2_advec_cell_kernel3_zdir, &xdim2, sizeof(int));
xdim2_advec_cell_kernel3_zdir_h = xdim2;
hipMemcpyToSymbol(ydim2_advec_cell_kernel3_zdir, &ydim2, sizeof(int));
ydim2_advec_cell_kernel3_zdir_h = ydim2;
hipMemcpyToSymbol(xdim3_advec_cell_kernel3_zdir, &xdim3, sizeof(int));
xdim3_advec_cell_kernel3_zdir_h = xdim3;
hipMemcpyToSymbol(ydim3_advec_cell_kernel3_zdir, &ydim3, sizeof(int));
ydim3_advec_cell_kernel3_zdir_h = ydim3;
hipMemcpyToSymbol(xdim4_advec_cell_kernel3_zdir, &xdim4, sizeof(int));
xdim4_advec_cell_kernel3_zdir_h = xdim4;
hipMemcpyToSymbol(ydim4_advec_cell_kernel3_zdir, &ydim4, sizeof(int));
ydim4_advec_cell_kernel3_zdir_h = ydim4;
hipMemcpyToSymbol(xdim5_advec_cell_kernel3_zdir, &xdim5, sizeof(int));
xdim5_advec_cell_kernel3_zdir_h = xdim5;
hipMemcpyToSymbol(ydim5_advec_cell_kernel3_zdir, &ydim5, sizeof(int));
ydim5_advec_cell_kernel3_zdir_h = ydim5;
hipMemcpyToSymbol(xdim6_advec_cell_kernel3_zdir, &xdim6, sizeof(int));
xdim6_advec_cell_kernel3_zdir_h = xdim6;
hipMemcpyToSymbol(ydim6_advec_cell_kernel3_zdir, &ydim6, sizeof(int));
ydim6_advec_cell_kernel3_zdir_h = ydim6;
hipMemcpyToSymbol(xdim7_advec_cell_kernel3_zdir, &xdim7, sizeof(int));
xdim7_advec_cell_kernel3_zdir_h = xdim7;
hipMemcpyToSymbol(ydim7_advec_cell_kernel3_zdir, &ydim7, sizeof(int));
ydim7_advec_cell_kernel3_zdir_h = ydim7;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size);
char *p_a[8];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6 +
dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]);
base6 = base6 +
dat6 * args[6].dat->size[0] * args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
int base7 = args[7].dat->base_offset +
dat7 * 1 * (start[0] * args[7].stencil->stride[0]);
base7 = base7 +
dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1]);
base7 = base7 +
dat7 * args[7].dat->size[0] * args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2]);
p_a[7] = (char *)args[7].data_d + base7;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args, 8, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[118].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_advec_cell_kernel3_zdir), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7],
x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[118].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[6], range);
ops_set_halo_dirtybit3(&args[7], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[118].mpi_time += t2 - t1;
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg7);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_cell_kernel3_zdir(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6,
ops_arg arg7) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 118;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 118;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg *)malloc(8 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index;
desc->function = ops_par_loop_advec_cell_kernel3_zdir_execute;
if (OPS_diags > 1) {
ops_timing_realloc(118, "advec_cell_kernel3_zdir");
}
ops_enqueue_kernel(desc);
}
#endif
| 026b1386a346b9fc411eff4ac259c1efb0bcc0ca.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_advec_cell_kernel3_zdir;
int xdim0_advec_cell_kernel3_zdir_h = -1;
__constant__ int ydim0_advec_cell_kernel3_zdir;
int ydim0_advec_cell_kernel3_zdir_h = -1;
__constant__ int xdim1_advec_cell_kernel3_zdir;
int xdim1_advec_cell_kernel3_zdir_h = -1;
__constant__ int ydim1_advec_cell_kernel3_zdir;
int ydim1_advec_cell_kernel3_zdir_h = -1;
__constant__ int xdim2_advec_cell_kernel3_zdir;
int xdim2_advec_cell_kernel3_zdir_h = -1;
__constant__ int ydim2_advec_cell_kernel3_zdir;
int ydim2_advec_cell_kernel3_zdir_h = -1;
__constant__ int xdim3_advec_cell_kernel3_zdir;
int xdim3_advec_cell_kernel3_zdir_h = -1;
__constant__ int ydim3_advec_cell_kernel3_zdir;
int ydim3_advec_cell_kernel3_zdir_h = -1;
__constant__ int xdim4_advec_cell_kernel3_zdir;
int xdim4_advec_cell_kernel3_zdir_h = -1;
__constant__ int ydim4_advec_cell_kernel3_zdir;
int ydim4_advec_cell_kernel3_zdir_h = -1;
__constant__ int xdim5_advec_cell_kernel3_zdir;
int xdim5_advec_cell_kernel3_zdir_h = -1;
__constant__ int ydim5_advec_cell_kernel3_zdir;
int ydim5_advec_cell_kernel3_zdir_h = -1;
__constant__ int xdim6_advec_cell_kernel3_zdir;
int xdim6_advec_cell_kernel3_zdir_h = -1;
__constant__ int ydim6_advec_cell_kernel3_zdir;
int ydim6_advec_cell_kernel3_zdir_h = -1;
__constant__ int xdim7_advec_cell_kernel3_zdir;
int xdim7_advec_cell_kernel3_zdir_h = -1;
__constant__ int ydim7_advec_cell_kernel3_zdir;
int ydim7_advec_cell_kernel3_zdir_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_cell_kernel3_zdir * (y) + \
xdim0_advec_cell_kernel3_zdir * ydim0_advec_cell_kernel3_zdir * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_cell_kernel3_zdir * (y) + \
xdim1_advec_cell_kernel3_zdir * ydim1_advec_cell_kernel3_zdir * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_advec_cell_kernel3_zdir * (y) + \
xdim2_advec_cell_kernel3_zdir * ydim2_advec_cell_kernel3_zdir * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_advec_cell_kernel3_zdir * (y) + \
xdim3_advec_cell_kernel3_zdir * ydim3_advec_cell_kernel3_zdir * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_advec_cell_kernel3_zdir * (y) + \
xdim4_advec_cell_kernel3_zdir * ydim4_advec_cell_kernel3_zdir * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_advec_cell_kernel3_zdir * (y) + \
xdim5_advec_cell_kernel3_zdir * ydim5_advec_cell_kernel3_zdir * (z))
#define OPS_ACC6(x, y, z) \
(x + xdim6_advec_cell_kernel3_zdir * (y) + \
xdim6_advec_cell_kernel3_zdir * ydim6_advec_cell_kernel3_zdir * (z))
#define OPS_ACC7(x, y, z) \
(x + xdim7_advec_cell_kernel3_zdir * (y) + \
xdim7_advec_cell_kernel3_zdir * ydim7_advec_cell_kernel3_zdir * (z))
// user function
__device__
inline void
advec_cell_kernel3_zdir_gpu(const double *vol_flux_z, const double *pre_vol,
const int *zz, const double *vertexdz,
const double *density1, const double *energy1,
double *mass_flux_z, double *ener_flux) {
double sigmat, sigmav, sigmam, sigma3, sigma4;
double diffuw, diffdw, limiter;
double one_by_six = 1.0 / 6.0;
int z_max = field.z_max;
int upwind, donor, downwind, dif;
if (vol_flux_z[OPS_ACC0(0, 0, 0)] > 0.0) {
upwind = -2;
donor = -1;
downwind = 0;
dif = donor;
} else if (zz[OPS_ACC2(0, 0, 1)] < z_max + 2 - 2) {
upwind = 1;
donor = 0;
downwind = -1;
dif = upwind;
} else {
upwind = 0;
donor = 0;
downwind = -1;
dif = upwind;
}
sigmat = fabs(vol_flux_z[OPS_ACC0(0, 0, 0)]) / pre_vol[OPS_ACC1(0, 0, donor)];
sigma3 = (1.0 + sigmat) *
(vertexdz[OPS_ACC3(0, 0, 0)] / vertexdz[OPS_ACC3(0, 0, dif)]);
sigma4 = 2.0 - sigmat;
sigmav = sigmat;
diffuw = density1[OPS_ACC4(0, 0, donor)] - density1[OPS_ACC4(0, 0, upwind)];
diffdw = density1[OPS_ACC4(0, 0, downwind)] - density1[OPS_ACC4(0, 0, donor)];
if ((diffuw * diffdw) > 0.0)
limiter = (1.0 - sigmav) * SIGN(1.0, diffdw) *
MIN(MIN(fabs(diffuw), fabs(diffdw)),
one_by_six * (sigma3 * fabs(diffuw) + sigma4 * fabs(diffdw)));
else
limiter = 0.0;
mass_flux_z[OPS_ACC6(0, 0, 0)] = vol_flux_z[OPS_ACC0(0, 0, 0)] *
(density1[OPS_ACC4(0, 0, donor)] + limiter);
sigmam = fabs(mass_flux_z[OPS_ACC6(0, 0, 0)]) /
(density1[OPS_ACC4(0, 0, donor)] * pre_vol[OPS_ACC1(0, 0, donor)]);
diffuw = energy1[OPS_ACC5(0, 0, donor)] - energy1[OPS_ACC5(0, 0, upwind)];
diffdw = energy1[OPS_ACC5(0, 0, downwind)] - energy1[OPS_ACC5(0, 0, donor)];
if ((diffuw * diffdw) > 0.0)
limiter = (1.0 - sigmam) * SIGN(1.0, diffdw) *
MIN(MIN(fabs(diffuw), fabs(diffdw)),
one_by_six * (sigma3 * fabs(diffuw) + sigma4 * fabs(diffdw)));
else
limiter = 0.0;
ener_flux[OPS_ACC7(0, 0, 0)] = mass_flux_z[OPS_ACC6(0, 0, 0)] *
(energy1[OPS_ACC5(0, 0, donor)] + limiter);
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
__global__ void ops_advec_cell_kernel3_zdir(
const double *__restrict arg0, const double *__restrict arg1,
const int *__restrict arg2, const double *__restrict arg3,
const double *__restrict arg4, const double *__restrict arg5,
double *__restrict arg6, double *__restrict arg7, int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_cell_kernel3_zdir +
idx_z * 1 * 1 * xdim0_advec_cell_kernel3_zdir *
ydim0_advec_cell_kernel3_zdir;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_cell_kernel3_zdir +
idx_z * 1 * 1 * xdim1_advec_cell_kernel3_zdir *
ydim1_advec_cell_kernel3_zdir;
arg2 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim2_advec_cell_kernel3_zdir +
idx_z * 1 * 1 * xdim2_advec_cell_kernel3_zdir *
ydim2_advec_cell_kernel3_zdir;
arg3 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim3_advec_cell_kernel3_zdir +
idx_z * 1 * 1 * xdim3_advec_cell_kernel3_zdir *
ydim3_advec_cell_kernel3_zdir;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_cell_kernel3_zdir +
idx_z * 1 * 1 * xdim4_advec_cell_kernel3_zdir *
ydim4_advec_cell_kernel3_zdir;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_advec_cell_kernel3_zdir +
idx_z * 1 * 1 * xdim5_advec_cell_kernel3_zdir *
ydim5_advec_cell_kernel3_zdir;
arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_advec_cell_kernel3_zdir +
idx_z * 1 * 1 * xdim6_advec_cell_kernel3_zdir *
ydim6_advec_cell_kernel3_zdir;
arg7 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim7_advec_cell_kernel3_zdir +
idx_z * 1 * 1 * xdim7_advec_cell_kernel3_zdir *
ydim7_advec_cell_kernel3_zdir;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_cell_kernel3_zdir_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_cell_kernel3_zdir(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6,
ops_arg arg7) {
#else
void ops_par_loop_advec_cell_kernel3_zdir_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 8, range, 118))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(118, "advec_cell_kernel3_zdir");
OPS_kernels[118].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0];
int ydim7 = args[7].dat->size[1];
if (xdim0 != xdim0_advec_cell_kernel3_zdir_h ||
ydim0 != ydim0_advec_cell_kernel3_zdir_h ||
xdim1 != xdim1_advec_cell_kernel3_zdir_h ||
ydim1 != ydim1_advec_cell_kernel3_zdir_h ||
xdim2 != xdim2_advec_cell_kernel3_zdir_h ||
ydim2 != ydim2_advec_cell_kernel3_zdir_h ||
xdim3 != xdim3_advec_cell_kernel3_zdir_h ||
ydim3 != ydim3_advec_cell_kernel3_zdir_h ||
xdim4 != xdim4_advec_cell_kernel3_zdir_h ||
ydim4 != ydim4_advec_cell_kernel3_zdir_h ||
xdim5 != xdim5_advec_cell_kernel3_zdir_h ||
ydim5 != ydim5_advec_cell_kernel3_zdir_h ||
xdim6 != xdim6_advec_cell_kernel3_zdir_h ||
ydim6 != ydim6_advec_cell_kernel3_zdir_h ||
xdim7 != xdim7_advec_cell_kernel3_zdir_h ||
ydim7 != ydim7_advec_cell_kernel3_zdir_h) {
cudaMemcpyToSymbol(xdim0_advec_cell_kernel3_zdir, &xdim0, sizeof(int));
xdim0_advec_cell_kernel3_zdir_h = xdim0;
cudaMemcpyToSymbol(ydim0_advec_cell_kernel3_zdir, &ydim0, sizeof(int));
ydim0_advec_cell_kernel3_zdir_h = ydim0;
cudaMemcpyToSymbol(xdim1_advec_cell_kernel3_zdir, &xdim1, sizeof(int));
xdim1_advec_cell_kernel3_zdir_h = xdim1;
cudaMemcpyToSymbol(ydim1_advec_cell_kernel3_zdir, &ydim1, sizeof(int));
ydim1_advec_cell_kernel3_zdir_h = ydim1;
cudaMemcpyToSymbol(xdim2_advec_cell_kernel3_zdir, &xdim2, sizeof(int));
xdim2_advec_cell_kernel3_zdir_h = xdim2;
cudaMemcpyToSymbol(ydim2_advec_cell_kernel3_zdir, &ydim2, sizeof(int));
ydim2_advec_cell_kernel3_zdir_h = ydim2;
cudaMemcpyToSymbol(xdim3_advec_cell_kernel3_zdir, &xdim3, sizeof(int));
xdim3_advec_cell_kernel3_zdir_h = xdim3;
cudaMemcpyToSymbol(ydim3_advec_cell_kernel3_zdir, &ydim3, sizeof(int));
ydim3_advec_cell_kernel3_zdir_h = ydim3;
cudaMemcpyToSymbol(xdim4_advec_cell_kernel3_zdir, &xdim4, sizeof(int));
xdim4_advec_cell_kernel3_zdir_h = xdim4;
cudaMemcpyToSymbol(ydim4_advec_cell_kernel3_zdir, &ydim4, sizeof(int));
ydim4_advec_cell_kernel3_zdir_h = ydim4;
cudaMemcpyToSymbol(xdim5_advec_cell_kernel3_zdir, &xdim5, sizeof(int));
xdim5_advec_cell_kernel3_zdir_h = xdim5;
cudaMemcpyToSymbol(ydim5_advec_cell_kernel3_zdir, &ydim5, sizeof(int));
ydim5_advec_cell_kernel3_zdir_h = ydim5;
cudaMemcpyToSymbol(xdim6_advec_cell_kernel3_zdir, &xdim6, sizeof(int));
xdim6_advec_cell_kernel3_zdir_h = xdim6;
cudaMemcpyToSymbol(ydim6_advec_cell_kernel3_zdir, &ydim6, sizeof(int));
ydim6_advec_cell_kernel3_zdir_h = ydim6;
cudaMemcpyToSymbol(xdim7_advec_cell_kernel3_zdir, &xdim7, sizeof(int));
xdim7_advec_cell_kernel3_zdir_h = xdim7;
cudaMemcpyToSymbol(ydim7_advec_cell_kernel3_zdir, &ydim7, sizeof(int));
ydim7_advec_cell_kernel3_zdir_h = ydim7;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size);
char *p_a[8];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6 +
dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]);
base6 = base6 +
dat6 * args[6].dat->size[0] * args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
int base7 = args[7].dat->base_offset +
dat7 * 1 * (start[0] * args[7].stencil->stride[0]);
base7 = base7 +
dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1]);
base7 = base7 +
dat7 * args[7].dat->size[0] * args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2]);
p_a[7] = (char *)args[7].data_d + base7;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args, 8, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[118].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_advec_cell_kernel3_zdir<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7],
x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[118].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[6], range);
ops_set_halo_dirtybit3(&args[7], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[118].mpi_time += t2 - t1;
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg7);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_cell_kernel3_zdir(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6,
ops_arg arg7) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 118;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 118;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg *)malloc(8 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index;
desc->function = ops_par_loop_advec_cell_kernel3_zdir_execute;
if (OPS_diags > 1) {
ops_timing_realloc(118, "advec_cell_kernel3_zdir");
}
ops_enqueue_kernel(desc);
}
#endif
|
3cb0c04f1b3634fd1b258b87f5263a71a51851d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
/*
* This example demonstrates submitting work to a CUDA stream in depth-first
* order. Work submission in depth-first order may introduce false-dependencies
* between unrelated tasks in different CUDA streams, limiting the parallelism
* of a CUDA application. kernel_1, kernel_2, kernel_3, and kernel_4 simply
* implement identical, dummy computation. Separate kernels are used to make the
* scheduling of these kernels simpler to visualize in the Visual Profiler.
*/
#define N 300000
#define NSTREAM 4
__global__ void kernel_1()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_2()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_3()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_4()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
int main(int argc, char **argv)
{
int n_streams = NSTREAM;
int isize = 1;
int iblock = 1;
int bigcase = 0;
// get argument from command line
if (argc > 1) n_streams = atoi(argv[1]);
if (argc > 2) bigcase = atoi(argv[2]);
float elapsed_time;
// set up max connectioin
char* iname = "CUDA_DEVICE_MAX_CONNECTIONS";
setenv (iname, "32", 1);
char *ivalue = getenv (iname);
printf ("%s = %s\n", iname, ivalue);
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("> Using Device %d: %s with num_streams=%d\n", dev, deviceProp.name,
n_streams);
CHECK(hipSetDevice(dev));
// check if device support hyper-q
if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5))
{
if (deviceProp.concurrentKernels == 0)
{
printf("> GPU does not support concurrent kernel execution (SM 3.5 "
"or higher required)\n");
printf("> CUDA kernel runs will be serialized\n");
}
else
{
printf("> GPU does not support HyperQ\n");
printf("> CUDA kernel runs will have limited concurrency\n");
}
}
printf("> Compute Capability %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// Allocate and initialize an array of stream handles
hipStream_t *streams = (hipStream_t *) malloc(n_streams * sizeof(
hipStream_t));
for (int i = 0 ; i < n_streams ; i++)
{
CHECK(hipStreamCreate(&(streams[i])));
}
// run kernel with more threads
if (bigcase == 1)
{
iblock = 512;
isize = 1 << 12;
}
// set up execution configuration
dim3 block (iblock);
dim3 grid (isize / iblock);
printf("> grid %d block %d\n", grid.x, block.x);
// creat events
hipEvent_t start, stop;
CHECK(hipEventCreate(&start));
CHECK(hipEventCreate(&stop));
// record start event
CHECK(hipEventRecord(start, 0));
// dispatch job with depth first ordering
for (int i = 0; i < n_streams; i++)
{
hipLaunchKernelGGL(( kernel_1), dim3(grid), dim3(block), 0, streams[i], );
hipLaunchKernelGGL(( kernel_2), dim3(grid), dim3(block), 0, streams[i], );
hipLaunchKernelGGL(( kernel_3), dim3(grid), dim3(block), 0, streams[i], );
hipLaunchKernelGGL(( kernel_4), dim3(grid), dim3(block), 0, streams[i], );
}
// record stop event
CHECK(hipEventRecord(stop, 0));
CHECK(hipEventSynchronize(stop));
// calculate elapsed time
CHECK(hipEventElapsedTime(&elapsed_time, start, stop));
printf("Measured time for parallel execution = %.3fs\n",
elapsed_time / 1000.0f);
// release all stream
for (int i = 0 ; i < n_streams ; i++)
{
CHECK(hipStreamDestroy(streams[i]));
}
free(streams);
// destroy events
CHECK(hipEventDestroy(start));
CHECK(hipEventDestroy(stop));
// reset device
CHECK(hipDeviceReset());
return 0;
}
| 3cb0c04f1b3634fd1b258b87f5263a71a51851d2.cu | #include "../common/common.h"
#include <stdio.h>
#include <cuda_runtime.h>
#include <stdlib.h>
/*
* This example demonstrates submitting work to a CUDA stream in depth-first
* order. Work submission in depth-first order may introduce false-dependencies
* between unrelated tasks in different CUDA streams, limiting the parallelism
* of a CUDA application. kernel_1, kernel_2, kernel_3, and kernel_4 simply
* implement identical, dummy computation. Separate kernels are used to make the
* scheduling of these kernels simpler to visualize in the Visual Profiler.
*/
#define N 300000
#define NSTREAM 4
__global__ void kernel_1()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_2()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_3()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_4()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
int main(int argc, char **argv)
{
int n_streams = NSTREAM;
int isize = 1;
int iblock = 1;
int bigcase = 0;
// get argument from command line
if (argc > 1) n_streams = atoi(argv[1]);
if (argc > 2) bigcase = atoi(argv[2]);
float elapsed_time;
// set up max connectioin
char* iname = "CUDA_DEVICE_MAX_CONNECTIONS";
setenv (iname, "32", 1);
char *ivalue = getenv (iname);
printf ("%s = %s\n", iname, ivalue);
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("> Using Device %d: %s with num_streams=%d\n", dev, deviceProp.name,
n_streams);
CHECK(cudaSetDevice(dev));
// check if device support hyper-q
if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5))
{
if (deviceProp.concurrentKernels == 0)
{
printf("> GPU does not support concurrent kernel execution (SM 3.5 "
"or higher required)\n");
printf("> CUDA kernel runs will be serialized\n");
}
else
{
printf("> GPU does not support HyperQ\n");
printf("> CUDA kernel runs will have limited concurrency\n");
}
}
printf("> Compute Capability %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// Allocate and initialize an array of stream handles
cudaStream_t *streams = (cudaStream_t *) malloc(n_streams * sizeof(
cudaStream_t));
for (int i = 0 ; i < n_streams ; i++)
{
CHECK(cudaStreamCreate(&(streams[i])));
}
// run kernel with more threads
if (bigcase == 1)
{
iblock = 512;
isize = 1 << 12;
}
// set up execution configuration
dim3 block (iblock);
dim3 grid (isize / iblock);
printf("> grid %d block %d\n", grid.x, block.x);
// creat events
cudaEvent_t start, stop;
CHECK(cudaEventCreate(&start));
CHECK(cudaEventCreate(&stop));
// record start event
CHECK(cudaEventRecord(start, 0));
// dispatch job with depth first ordering
for (int i = 0; i < n_streams; i++)
{
kernel_1<<<grid, block, 0, streams[i]>>>();
kernel_2<<<grid, block, 0, streams[i]>>>();
kernel_3<<<grid, block, 0, streams[i]>>>();
kernel_4<<<grid, block, 0, streams[i]>>>();
}
// record stop event
CHECK(cudaEventRecord(stop, 0));
CHECK(cudaEventSynchronize(stop));
// calculate elapsed time
CHECK(cudaEventElapsedTime(&elapsed_time, start, stop));
printf("Measured time for parallel execution = %.3fs\n",
elapsed_time / 1000.0f);
// release all stream
for (int i = 0 ; i < n_streams ; i++)
{
CHECK(cudaStreamDestroy(streams[i]));
}
free(streams);
// destroy events
CHECK(cudaEventDestroy(start));
CHECK(cudaEventDestroy(stop));
// reset device
CHECK(cudaDeviceReset());
return 0;
}
|
12bf3676c2104a80d2df2bc06366ea5533c29284.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
nvcc StarContinueRun.cu -o StarContinueRun.exe -lglut -lGL -lGLU -lm
nvcc StarContinueRun.cu -o StarContinueRun.exe -lglut -lGL -lGLU -lm --use_fast_math
*/
#include "../CommonCompileFiles/binaryStarCommonIncludes.h"
#include "../CommonCompileFiles/binaryStarCommonDefines.h"
#include "../CommonCompileFiles/binaryStarCommonGlobals.h"
#include "../CommonCompileFiles/binaryStarCommonFunctions.h"
#include "../CommonCompileFiles/binaryStarCommonRunGlobals.h"
#include "../CommonCompileFiles/binaryStarCommonRunFunctions.h"
//Time to add on to the run. Readin from the comand line.
float ContinueRunTime;
void openAndReadFiles()
{
ifstream data;
string name;
//Opening the positions and velosity file to dump stuff to make movies out of. Need to move to the end of the file.
PosAndVelFile = fopen("PosAndVel", "rb+");
if(PosAndVelFile == NULL)
{
printf("\n\n The PosAndVel file does not exist\n\n");
exit(0);
}
fseek(PosAndVelFile,0,SEEK_END);
//Reading in the run parameters
data.open("RunParameters");
if(data.is_open() == 1)
{
getline(data,name,'=');
data >> SystemLengthConverterToKilometers;
getline(data,name,'=');
data >> SystemMassConverterToKilograms;
getline(data,name,'=');
data >> SystemTimeConverterToSeconds;
getline(data,name,'=');
data >> NumberElementsStar1;
getline(data,name,'=');
data >> NumberElementsStar2;
getline(data,name,'=');
data >> CorePushBackReduction;
getline(data,name,'=');
data >> PlasmaPushBackReduction;
}
else
{
printf("\nTSU Error could not open RunParameters file\n");
exit(0);
}
data.close();
NumberElements = NumberElementsStar1 + NumberElementsStar2;
ContinueRunTime *=((24.0*60.0*60.0)/SystemTimeConverterToSeconds);
//Reading in the run parameters
data.open("BranchRunParameters");
if(data.is_open() == 1)
{
getline(data,name,'=');
data >> RecordRate;
getline(data,name,'=');
data >> DrawRate;
}
else
{
printf("\nTSU Error could not open BranchRunParameters file\n");
exit(0);
}
data.close();
}
void readInTheInitialsStars()
{
FILE *startFile = fopen("FinalPosVelForce","rb");
if(startFile == NULL)
{
printf("\n\n The FinalPosVelForce file does not exist\n\n");
exit(0);
}
fread(&StartTime, sizeof(float), 1, startFile);
fread(PosCPU, sizeof(float4), NumberElements, startFile);
fread(VelCPU, sizeof(float4), NumberElements, startFile);
fread(ForceCPU, sizeof(float4), NumberElements, startFile);
fclose(startFile);
}
__global__ void getForces(float4 *pos, float4 *vel, float4 *force, int numberElementsStar1, int numberOfElements, float corePushBackReduction, float plasmaPushBackReduction, int gPUNumber, int gPUsUsed)
{
int id, ids, i, j, k;
float4 posMe, velMe, forceMe;
float4 partialForce;
double forceSumX, forceSumY, forceSumZ;
__shared__ float4 shPos[BLOCKSIZE];
__shared__ float4 shVel[BLOCKSIZE];
__shared__ float4 shForce[BLOCKSIZE];
id = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*gPUNumber;
if(numberOfElements <= id)
{
printf("\n TSU error: id out of bounds in getForces. \n");
}
forceSumX = 0.0;
forceSumY = 0.0;
forceSumZ = 0.0;
posMe.x = pos[id].x;
posMe.y = pos[id].y;
posMe.z = pos[id].z;
posMe.w = pos[id].w;
velMe.x = vel[id].x;
velMe.y = vel[id].y;
velMe.z = vel[id].z;
velMe.w = vel[id].w;
forceMe.x = force[id].x;
forceMe.y = force[id].y;
forceMe.z = force[id].z;
forceMe.w = force[id].w;
for(k =0; k < gPUsUsed; k++)
{
for(j = 0; j < gridDim.x; j++)
{
shPos[threadIdx.x] = pos [threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
shVel[threadIdx.x] = vel [threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
shForce[threadIdx.x] = force[threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
__syncthreads();
#pragma unroll 32
for(i = 0; i < blockDim.x; i++)
{
ids = i + blockDim.x*j + blockDim.x*gridDim.x*k;
if(id != ids)
{
if(id == 0 && ids == numberElementsStar1)
{
partialForce = calculateCoreCoreForce(posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePushBackReduction);
}
else if(id == numberElementsStar1 && ids == 0)
{
partialForce = calculateCoreCoreForce(posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePushBackReduction);
}
else if(id == 0 || id == numberElementsStar1)
{
partialForce = calculateCorePlasmaForce(0, posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePushBackReduction);
}
else if(ids == 0 || ids == numberElementsStar1)
{
partialForce = calculateCorePlasmaForce(1, posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePushBackReduction);
}
else
{
partialForce = calculatePlasmaPlasmaForce(posMe, shPos[i], velMe, shVel[i], plasmaPushBackReduction);
}
forceSumX += partialForce.x;
forceSumY += partialForce.y;
forceSumZ += partialForce.z;
}
}
__syncthreads();
}
}
force[id].x = (float)forceSumX;
force[id].y = (float)forceSumY;
force[id].z = (float)forceSumZ;
}
__global__ void moveBodies(float4 *pos, float4 *vel, float4 *force, float dt, int gPUNumber)
{
int id = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*gPUNumber;
vel[id].x += (force[id].x/pos[id].w)*dt;
vel[id].y += (force[id].y/pos[id].w)*dt;
vel[id].z += (force[id].z/pos[id].w)*dt;
pos[id].x += vel[id].x*dt;
pos[id].y += vel[id].y*dt;
pos[id].z += vel[id].z*dt;
}
float starNbody(float time, float runTime, float dt, int gPUsUsed)
{
int tDraw = 0;
int tRecord = 0;
int tBackup = 0;
int backupRate = 1000;
while(time < runTime)
{
int offSet = NumberElements/gPUsUsed;
//Getting forces
for(int i = 0; i < gPUsUsed; i++)
{
hipSetDevice(i);
errorCheck("hipSetDevice");
hipLaunchKernelGGL(( getForces), dim3(GridConfig), dim3(BlockConfig), 0, 0, PosGPU[i], VelGPU[i], ForceGPU[i], NumberElementsStar1, NumberElements, CorePushBackReduction, PlasmaPushBackReduction, i, gPUsUsed);
errorCheck("getForces");
}
//Moving elements
for(int i = 0; i < gPUsUsed; i++)
{
hipSetDevice(i);
errorCheck("hipSetDevice");
hipLaunchKernelGGL(( moveBodies), dim3(GridConfig), dim3(BlockConfig), 0, 0, PosGPU[i], VelGPU[i], ForceGPU[i], dt, i);
errorCheck("moveBodies");
}
hipDeviceSynchronize();
errorCheck("hipDeviceSynchronize");
//Sharing memory
for(int i = 0; i < gPUsUsed; i++)
{
hipSetDevice(i);
errorCheck("hipSetDevice");
for(int j = 0; j < gPUsUsed; j++)
{
if(i != j)
{
hipMemcpyAsync(&PosGPU[j][i*offSet], &PosGPU[i][i*offSet], (NumberElements/gPUsUsed)*sizeof(float4), hipMemcpyDeviceToDevice);
errorCheck("hipMemcpy Pos");
hipMemcpyAsync(&VelGPU[j][i*offSet], &VelGPU[i][i*offSet], (NumberElements/gPUsUsed)*sizeof(float4), hipMemcpyDeviceToDevice);
errorCheck("hipMemcpy Vel");
}
}
}
hipDeviceSynchronize();
errorCheck("hipDeviceSynchronize");
if(tDraw == DrawRate)
{
//Because it is shared above it will only need to be copied from one GPU.
hipSetDevice(0);
errorCheck("hipSetDevice");
hipMemcpy(PosCPU, PosGPU[0], (NumberElements)*sizeof(float4), hipMemcpyDeviceToHost);
errorCheck("hipMemcpy Pos draw");
drawPicture();
tDraw = 0;
printf("\n Time in days = %f", time*SystemTimeConverterToSeconds/(60.0*60.0*24.0));
}
if(tRecord == RecordRate)
{
//Because it is shared above it will only need to be copied from one GPU.
hipSetDevice(0);
errorCheck("hipSetDevice");
hipMemcpy(PosCPU, PosGPU[0], (NumberElements)*sizeof(float4), hipMemcpyDeviceToHost);
errorCheck("hipMemcpy Pos record");
hipMemcpy(VelCPU, VelGPU[0], (NumberElements)*sizeof(float4), hipMemcpyDeviceToHost);
errorCheck("hipMemcpy Vel record");
recordPosAndVel(time);
tRecord = 0;
}
if(tBackup == backupRate)
{
//Because it is shared above it will only need to be copied from one GPU.
//Saving the the runs positions, velosities and forces incase the system crashes in the middle of a run
copyStarsDownFromGPU();
recordFinalPosVelForceStars(time);
tBackup = 0;
}
tDraw++;
tRecord++;
tBackup++;
time += dt;
}
return(time - dt);
}
void control()
{
struct sigaction sa;
float time = StartTime;
clock_t startTimer, endTimer;
int gPUsUsed;
//Starting the timer.
startTimer = clock();
// Handling input from the screen.
sa.sa_handler = signalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART; // Restart functions if interrupted by handler
if (sigaction(SIGINT, &sa, NULL) == -1)
{
printf("\nTSU Error: sigaction error\n");
}
// Reading in the build parameters.
printf("\n Reading and setting the run parameters.\n");
openAndReadFiles();
// Allocating memory for CPU and GPU.
printf("\n Allocating memory on the GPU and CPU and opening positions and velocities file.\n");
allocateCPUMemory();
// Reading in the raw stars generated by the build program.
printf("\n Reading in the stars that were generated in the build program.\n");
readInTheInitialsStars();
// Draw the intial configuration.
printf("\n Drawing initial picture.\n");
drawPicture();
// Seting up the GPU.
printf("\n Setting up the GPU.\n");
gPUsUsed = deviceSetup();
// Running the simulation.
printf("\n Running the simulation.\n");
copyStarsUpToGPU(gPUsUsed);
time = starNbody(StartTime, StartTime + ContinueRunTime, DT, gPUsUsed);
// Saving the the runs final positions and velosities.
printf("\n Saving the the runs final positions and velosities.\n");
copyStarsDownFromGPU();
recordFinalPosVelForceStars(time);
// Saving any wanted stats about the run that you may want. I don't have anything to record as of yet.
printf("\n Saving any wanted stats about the run that you may want.\n");
//recordStarStats();
// Freeing memory.
printf("\n Cleaning up the run.\n");
cleanUp(gPUsUsed);
fclose(PosAndVelFile);
// Stopping timer and printing out run time.
endTimer = clock();
int seconds = (endTimer - startTimer)/CLOCKS_PER_SEC;
int hours = seconds/3600;
int minutes = (seconds - hours*3600)/60;
seconds = seconds - hours*3600 - minutes*60;
printf("\n Total time taken for this run: %d hours %d minutes %d seconds\n", hours, minutes, seconds);
printf("\n The run has finished successfully \n\n");
exit(0);
}
int main(int argc, char** argv)
{
if( argc < 2)
{
printf("\n You need to intire an amount of time to add to the run on the comand line\n");
exit(0);
}
else
{
ContinueRunTime = atof(argv[1]); //Reading time in as days. Need to put in our units after paranter file is read in.
}
//Globals for setting up the viewing window
int xWindowSize = 2500;
int yWindowSize = 2500;
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB);
glutInitWindowSize(xWindowSize,yWindowSize);
glutInitWindowPosition(0,0);
glutCreateWindow("Creating Stars");
glutReshapeFunc(reshape);
init();
glShadeModel(GL_SMOOTH);
glClearColor(0.0, 0.0, 0.0, 0.0);
glutDisplayFunc(Display);
glutReshapeFunc(reshape);
glutIdleFunc(control);
glutMainLoop();
return 0;
}
| 12bf3676c2104a80d2df2bc06366ea5533c29284.cu | /*
nvcc StarContinueRun.cu -o StarContinueRun.exe -lglut -lGL -lGLU -lm
nvcc StarContinueRun.cu -o StarContinueRun.exe -lglut -lGL -lGLU -lm --use_fast_math
*/
#include "../CommonCompileFiles/binaryStarCommonIncludes.h"
#include "../CommonCompileFiles/binaryStarCommonDefines.h"
#include "../CommonCompileFiles/binaryStarCommonGlobals.h"
#include "../CommonCompileFiles/binaryStarCommonFunctions.h"
#include "../CommonCompileFiles/binaryStarCommonRunGlobals.h"
#include "../CommonCompileFiles/binaryStarCommonRunFunctions.h"
//Time to add on to the run. Readin from the comand line.
float ContinueRunTime;
void openAndReadFiles()
{
ifstream data;
string name;
//Opening the positions and velosity file to dump stuff to make movies out of. Need to move to the end of the file.
PosAndVelFile = fopen("PosAndVel", "rb+");
if(PosAndVelFile == NULL)
{
printf("\n\n The PosAndVel file does not exist\n\n");
exit(0);
}
fseek(PosAndVelFile,0,SEEK_END);
//Reading in the run parameters
data.open("RunParameters");
if(data.is_open() == 1)
{
getline(data,name,'=');
data >> SystemLengthConverterToKilometers;
getline(data,name,'=');
data >> SystemMassConverterToKilograms;
getline(data,name,'=');
data >> SystemTimeConverterToSeconds;
getline(data,name,'=');
data >> NumberElementsStar1;
getline(data,name,'=');
data >> NumberElementsStar2;
getline(data,name,'=');
data >> CorePushBackReduction;
getline(data,name,'=');
data >> PlasmaPushBackReduction;
}
else
{
printf("\nTSU Error could not open RunParameters file\n");
exit(0);
}
data.close();
NumberElements = NumberElementsStar1 + NumberElementsStar2;
ContinueRunTime *=((24.0*60.0*60.0)/SystemTimeConverterToSeconds);
//Reading in the run parameters
data.open("BranchRunParameters");
if(data.is_open() == 1)
{
getline(data,name,'=');
data >> RecordRate;
getline(data,name,'=');
data >> DrawRate;
}
else
{
printf("\nTSU Error could not open BranchRunParameters file\n");
exit(0);
}
data.close();
}
void readInTheInitialsStars()
{
FILE *startFile = fopen("FinalPosVelForce","rb");
if(startFile == NULL)
{
printf("\n\n The FinalPosVelForce file does not exist\n\n");
exit(0);
}
fread(&StartTime, sizeof(float), 1, startFile);
fread(PosCPU, sizeof(float4), NumberElements, startFile);
fread(VelCPU, sizeof(float4), NumberElements, startFile);
fread(ForceCPU, sizeof(float4), NumberElements, startFile);
fclose(startFile);
}
__global__ void getForces(float4 *pos, float4 *vel, float4 *force, int numberElementsStar1, int numberOfElements, float corePushBackReduction, float plasmaPushBackReduction, int gPUNumber, int gPUsUsed)
{
int id, ids, i, j, k;
float4 posMe, velMe, forceMe;
float4 partialForce;
double forceSumX, forceSumY, forceSumZ;
__shared__ float4 shPos[BLOCKSIZE];
__shared__ float4 shVel[BLOCKSIZE];
__shared__ float4 shForce[BLOCKSIZE];
id = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*gPUNumber;
if(numberOfElements <= id)
{
printf("\n TSU error: id out of bounds in getForces. \n");
}
forceSumX = 0.0;
forceSumY = 0.0;
forceSumZ = 0.0;
posMe.x = pos[id].x;
posMe.y = pos[id].y;
posMe.z = pos[id].z;
posMe.w = pos[id].w;
velMe.x = vel[id].x;
velMe.y = vel[id].y;
velMe.z = vel[id].z;
velMe.w = vel[id].w;
forceMe.x = force[id].x;
forceMe.y = force[id].y;
forceMe.z = force[id].z;
forceMe.w = force[id].w;
for(k =0; k < gPUsUsed; k++)
{
for(j = 0; j < gridDim.x; j++)
{
shPos[threadIdx.x] = pos [threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
shVel[threadIdx.x] = vel [threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
shForce[threadIdx.x] = force[threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
__syncthreads();
#pragma unroll 32
for(i = 0; i < blockDim.x; i++)
{
ids = i + blockDim.x*j + blockDim.x*gridDim.x*k;
if(id != ids)
{
if(id == 0 && ids == numberElementsStar1)
{
partialForce = calculateCoreCoreForce(posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePushBackReduction);
}
else if(id == numberElementsStar1 && ids == 0)
{
partialForce = calculateCoreCoreForce(posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePushBackReduction);
}
else if(id == 0 || id == numberElementsStar1)
{
partialForce = calculateCorePlasmaForce(0, posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePushBackReduction);
}
else if(ids == 0 || ids == numberElementsStar1)
{
partialForce = calculateCorePlasmaForce(1, posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePushBackReduction);
}
else
{
partialForce = calculatePlasmaPlasmaForce(posMe, shPos[i], velMe, shVel[i], plasmaPushBackReduction);
}
forceSumX += partialForce.x;
forceSumY += partialForce.y;
forceSumZ += partialForce.z;
}
}
__syncthreads();
}
}
force[id].x = (float)forceSumX;
force[id].y = (float)forceSumY;
force[id].z = (float)forceSumZ;
}
__global__ void moveBodies(float4 *pos, float4 *vel, float4 *force, float dt, int gPUNumber)
{
int id = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*gPUNumber;
vel[id].x += (force[id].x/pos[id].w)*dt;
vel[id].y += (force[id].y/pos[id].w)*dt;
vel[id].z += (force[id].z/pos[id].w)*dt;
pos[id].x += vel[id].x*dt;
pos[id].y += vel[id].y*dt;
pos[id].z += vel[id].z*dt;
}
float starNbody(float time, float runTime, float dt, int gPUsUsed)
{
int tDraw = 0;
int tRecord = 0;
int tBackup = 0;
int backupRate = 1000;
while(time < runTime)
{
int offSet = NumberElements/gPUsUsed;
//Getting forces
for(int i = 0; i < gPUsUsed; i++)
{
cudaSetDevice(i);
errorCheck("cudaSetDevice");
getForces<<<GridConfig, BlockConfig>>>(PosGPU[i], VelGPU[i], ForceGPU[i], NumberElementsStar1, NumberElements, CorePushBackReduction, PlasmaPushBackReduction, i, gPUsUsed);
errorCheck("getForces");
}
//Moving elements
for(int i = 0; i < gPUsUsed; i++)
{
cudaSetDevice(i);
errorCheck("cudaSetDevice");
moveBodies<<<GridConfig, BlockConfig>>>(PosGPU[i], VelGPU[i], ForceGPU[i], dt, i);
errorCheck("moveBodies");
}
cudaDeviceSynchronize();
errorCheck("cudaDeviceSynchronize");
//Sharing memory
for(int i = 0; i < gPUsUsed; i++)
{
cudaSetDevice(i);
errorCheck("cudaSetDevice");
for(int j = 0; j < gPUsUsed; j++)
{
if(i != j)
{
cudaMemcpyAsync(&PosGPU[j][i*offSet], &PosGPU[i][i*offSet], (NumberElements/gPUsUsed)*sizeof(float4), cudaMemcpyDeviceToDevice);
errorCheck("cudaMemcpy Pos");
cudaMemcpyAsync(&VelGPU[j][i*offSet], &VelGPU[i][i*offSet], (NumberElements/gPUsUsed)*sizeof(float4), cudaMemcpyDeviceToDevice);
errorCheck("cudaMemcpy Vel");
}
}
}
cudaDeviceSynchronize();
errorCheck("cudaDeviceSynchronize");
if(tDraw == DrawRate)
{
//Because it is shared above it will only need to be copied from one GPU.
cudaSetDevice(0);
errorCheck("cudaSetDevice");
cudaMemcpy(PosCPU, PosGPU[0], (NumberElements)*sizeof(float4), cudaMemcpyDeviceToHost);
errorCheck("cudaMemcpy Pos draw");
drawPicture();
tDraw = 0;
printf("\n Time in days = %f", time*SystemTimeConverterToSeconds/(60.0*60.0*24.0));
}
if(tRecord == RecordRate)
{
//Because it is shared above it will only need to be copied from one GPU.
cudaSetDevice(0);
errorCheck("cudaSetDevice");
cudaMemcpy(PosCPU, PosGPU[0], (NumberElements)*sizeof(float4), cudaMemcpyDeviceToHost);
errorCheck("cudaMemcpy Pos record");
cudaMemcpy(VelCPU, VelGPU[0], (NumberElements)*sizeof(float4), cudaMemcpyDeviceToHost);
errorCheck("cudaMemcpy Vel record");
recordPosAndVel(time);
tRecord = 0;
}
if(tBackup == backupRate)
{
//Because it is shared above it will only need to be copied from one GPU.
//Saving the the runs positions, velosities and forces incase the system crashes in the middle of a run
copyStarsDownFromGPU();
recordFinalPosVelForceStars(time);
tBackup = 0;
}
tDraw++;
tRecord++;
tBackup++;
time += dt;
}
return(time - dt);
}
void control()
{
struct sigaction sa;
float time = StartTime;
clock_t startTimer, endTimer;
int gPUsUsed;
//Starting the timer.
startTimer = clock();
// Handling input from the screen.
sa.sa_handler = signalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART; // Restart functions if interrupted by handler
if (sigaction(SIGINT, &sa, NULL) == -1)
{
printf("\nTSU Error: sigaction error\n");
}
// Reading in the build parameters.
printf("\n Reading and setting the run parameters.\n");
openAndReadFiles();
// Allocating memory for CPU and GPU.
printf("\n Allocating memory on the GPU and CPU and opening positions and velocities file.\n");
allocateCPUMemory();
// Reading in the raw stars generated by the build program.
printf("\n Reading in the stars that were generated in the build program.\n");
readInTheInitialsStars();
// Draw the intial configuration.
printf("\n Drawing initial picture.\n");
drawPicture();
// Seting up the GPU.
printf("\n Setting up the GPU.\n");
gPUsUsed = deviceSetup();
// Running the simulation.
printf("\n Running the simulation.\n");
copyStarsUpToGPU(gPUsUsed);
time = starNbody(StartTime, StartTime + ContinueRunTime, DT, gPUsUsed);
// Saving the the runs final positions and velosities.
printf("\n Saving the the runs final positions and velosities.\n");
copyStarsDownFromGPU();
recordFinalPosVelForceStars(time);
// Saving any wanted stats about the run that you may want. I don't have anything to record as of yet.
printf("\n Saving any wanted stats about the run that you may want.\n");
//recordStarStats();
// Freeing memory.
printf("\n Cleaning up the run.\n");
cleanUp(gPUsUsed);
fclose(PosAndVelFile);
// Stopping timer and printing out run time.
endTimer = clock();
int seconds = (endTimer - startTimer)/CLOCKS_PER_SEC;
int hours = seconds/3600;
int minutes = (seconds - hours*3600)/60;
seconds = seconds - hours*3600 - minutes*60;
printf("\n Total time taken for this run: %d hours %d minutes %d seconds\n", hours, minutes, seconds);
printf("\n The run has finished successfully \n\n");
exit(0);
}
int main(int argc, char** argv)
{
if( argc < 2)
{
printf("\n You need to intire an amount of time to add to the run on the comand line\n");
exit(0);
}
else
{
ContinueRunTime = atof(argv[1]); //Reading time in as days. Need to put in our units after paranter file is read in.
}
//Globals for setting up the viewing window
int xWindowSize = 2500;
int yWindowSize = 2500;
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB);
glutInitWindowSize(xWindowSize,yWindowSize);
glutInitWindowPosition(0,0);
glutCreateWindow("Creating Stars");
glutReshapeFunc(reshape);
init();
glShadeModel(GL_SMOOTH);
glClearColor(0.0, 0.0, 0.0, 0.0);
glutDisplayFunc(Display);
glutReshapeFunc(reshape);
glutIdleFunc(control);
glutMainLoop();
return 0;
}
|
5edfb00f53f27166bcc8d7b0ec0ef48f1aa80987.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
* See COPYRIGHT.txt for license information
*/
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <unistd.h>
#include "utils.h"
#define MAX_MSG_SIZE 1 * 1024 * 1024
#define UNROLL 8
__global__ void ping_pong(volatile int *data_d, volatile int *flag_d, volatile int *flag_d_local,
int len, int pe, int iter, int skip, int *hflag) {
long long int start, stop;
double usec, time;
int i, tid, peer;
peer = !pe;
tid = threadIdx.x;
for (i = 0; i < (iter + skip); i++) {
if (i == skip) start = clock64();
if (pe) {
nvshmem_int_wait_until((int *)flag_d, NVSHMEM_CMP_EQ, (i + 1));
nvshmem_int_put_nbi((int *)data_d, (int *)data_d, len, peer);
nvshmem_fence();
nvshmemx_int_signal((int *)flag_d, i + 1, peer);
} else {
nvshmem_int_put_nbi((int *)data_d, (int *)data_d, len, peer);
nvshmem_fence();
nvshmemx_int_signal((int *)flag_d, i + 1, peer);
nvshmem_int_wait_until((int *)flag_d, NVSHMEM_CMP_EQ, (i + 1));
}
}
stop = clock64();
nvshmem_quiet();
*hflag = 1;
if ((pe == 0) && !tid) {
time = (stop - start) / iter;
usec = time * 1000 / clockrate;
printf("%7lu \t %8.2f \n", len * sizeof(int), usec);
}
}
int main(int c, char *v[]) {
int mype, npes, size;
int *flag_d = NULL, *data_d = NULL, *flag_d_local = NULL;
hipStream_t stream;
int iter = 500;
int skip = 50;
int max_msg_size = MAX_MSG_SIZE;
init_wrapper(&c, &v);
mype = nvshmem_my_pe();
npes = nvshmem_n_pes();
if (npes != 2) {
fprintf(stderr, "This test requires exactly two processes \n");
goto finalize;
}
data_d = (int *)nvshmem_malloc(max_msg_size);
flag_d = (int *)nvshmem_malloc(sizeof(int));
flag_d_local = (int *)nvshmem_malloc(sizeof(int));
CUDA_CHECK(hipMemset(data_d, 0, max_msg_size));
CUDA_CHECK(hipMemset(flag_d, 0, sizeof(int)));
CUDA_CHECK(hipMemset(flag_d_local, 0, sizeof(int)));
int *hflag, *hflag_d;
CUDA_CHECK(hipHostMalloc((void **)&hflag, sizeof(int), 0));
*hflag = 0;
CUDA_CHECK(hipHostGetDevicePointer(&hflag_d, hflag, 0));
CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
nvshmem_barrier_all();
CUDA_CHECK(hipDeviceSynchronize());
CUDA_CHECK(hipGetLastError());
if (mype == 0) {
printf("Note: This test measures full round-trip latency\n");
printf(" size(bytes) \t latency(us)\n");
fflush(stdout);
}
for (size = sizeof(int); size <= max_msg_size; size *= 2) {
int nelems, status = 0;
nelems = size / sizeof(int);
void *args[] = {&data_d, &flag_d, &flag_d_local, &nelems, &mype, &iter, &skip, &hflag_d};
CUDA_CHECK(hipMemset(flag_d, 0, sizeof(int)));
CUDA_CHECK(hipDeviceSynchronize());
nvshmem_barrier_all();
*hflag = 0;
status = nvshmemx_collective_launch((const void *)ping_pong, 1, 1, args, 0, stream);
if (status != NVSHMEMX_SUCCESS) {
fprintf(stderr, "shmemx_collective_launch failed %d \n", status);
exit(-1);
}
while (*((volatile int *)hflag) != 1)
;
nvshmem_barrier_all();
}
CUDA_CHECK(hipDeviceSynchronize());
finalize:
if (data_d) nvshmem_free(data_d);
if (flag_d) nvshmem_free(flag_d);
if (flag_d_local) nvshmem_free(flag_d_local);
finalize_wrapper();
return 0;
}
| 5edfb00f53f27166bcc8d7b0ec0ef48f1aa80987.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
* See COPYRIGHT.txt for license information
*/
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <unistd.h>
#include "utils.h"
#define MAX_MSG_SIZE 1 * 1024 * 1024
#define UNROLL 8
__global__ void ping_pong(volatile int *data_d, volatile int *flag_d, volatile int *flag_d_local,
int len, int pe, int iter, int skip, int *hflag) {
long long int start, stop;
double usec, time;
int i, tid, peer;
peer = !pe;
tid = threadIdx.x;
for (i = 0; i < (iter + skip); i++) {
if (i == skip) start = clock64();
if (pe) {
nvshmem_int_wait_until((int *)flag_d, NVSHMEM_CMP_EQ, (i + 1));
nvshmem_int_put_nbi((int *)data_d, (int *)data_d, len, peer);
nvshmem_fence();
nvshmemx_int_signal((int *)flag_d, i + 1, peer);
} else {
nvshmem_int_put_nbi((int *)data_d, (int *)data_d, len, peer);
nvshmem_fence();
nvshmemx_int_signal((int *)flag_d, i + 1, peer);
nvshmem_int_wait_until((int *)flag_d, NVSHMEM_CMP_EQ, (i + 1));
}
}
stop = clock64();
nvshmem_quiet();
*hflag = 1;
if ((pe == 0) && !tid) {
time = (stop - start) / iter;
usec = time * 1000 / clockrate;
printf("%7lu \t %8.2f \n", len * sizeof(int), usec);
}
}
int main(int c, char *v[]) {
int mype, npes, size;
int *flag_d = NULL, *data_d = NULL, *flag_d_local = NULL;
cudaStream_t stream;
int iter = 500;
int skip = 50;
int max_msg_size = MAX_MSG_SIZE;
init_wrapper(&c, &v);
mype = nvshmem_my_pe();
npes = nvshmem_n_pes();
if (npes != 2) {
fprintf(stderr, "This test requires exactly two processes \n");
goto finalize;
}
data_d = (int *)nvshmem_malloc(max_msg_size);
flag_d = (int *)nvshmem_malloc(sizeof(int));
flag_d_local = (int *)nvshmem_malloc(sizeof(int));
CUDA_CHECK(cudaMemset(data_d, 0, max_msg_size));
CUDA_CHECK(cudaMemset(flag_d, 0, sizeof(int)));
CUDA_CHECK(cudaMemset(flag_d_local, 0, sizeof(int)));
int *hflag, *hflag_d;
CUDA_CHECK(cudaHostAlloc((void **)&hflag, sizeof(int), 0));
*hflag = 0;
CUDA_CHECK(cudaHostGetDevicePointer(&hflag_d, hflag, 0));
CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
nvshmem_barrier_all();
CUDA_CHECK(cudaDeviceSynchronize());
CUDA_CHECK(cudaGetLastError());
if (mype == 0) {
printf("Note: This test measures full round-trip latency\n");
printf(" size(bytes) \t latency(us)\n");
fflush(stdout);
}
for (size = sizeof(int); size <= max_msg_size; size *= 2) {
int nelems, status = 0;
nelems = size / sizeof(int);
void *args[] = {&data_d, &flag_d, &flag_d_local, &nelems, &mype, &iter, &skip, &hflag_d};
CUDA_CHECK(cudaMemset(flag_d, 0, sizeof(int)));
CUDA_CHECK(cudaDeviceSynchronize());
nvshmem_barrier_all();
*hflag = 0;
status = nvshmemx_collective_launch((const void *)ping_pong, 1, 1, args, 0, stream);
if (status != NVSHMEMX_SUCCESS) {
fprintf(stderr, "shmemx_collective_launch failed %d \n", status);
exit(-1);
}
while (*((volatile int *)hflag) != 1)
;
nvshmem_barrier_all();
}
CUDA_CHECK(cudaDeviceSynchronize());
finalize:
if (data_d) nvshmem_free(data_d);
if (flag_d) nvshmem_free(flag_d);
if (flag_d_local) nvshmem_free(flag_d_local);
finalize_wrapper();
return 0;
}
|
e19b5b56df58d7c7f7669d5ff86749d19ea2a258.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/hip/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at {
namespace native {
namespace {
const char bessel_y1_name[] = "bessel_y1_forward";
void bessel_y1_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y1_cuda", [&]() {
jitted_gpu_kernel<bessel_y1_name, scalar_t, scalar_t, 1>(iterator, bessel_y1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y1_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return bessel_y1_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_bessel_y1_stub, &bessel_y1_kernel_cuda);
} // namespace native
} // namespace at
| e19b5b56df58d7c7f7669d5ff86749d19ea2a258.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at {
namespace native {
namespace {
const char bessel_y1_name[] = "bessel_y1_forward";
void bessel_y1_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y1_cuda", [&]() {
jitted_gpu_kernel<bessel_y1_name, scalar_t, scalar_t, 1>(iterator, bessel_y1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y1_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return bessel_y1_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_bessel_y1_stub, &bessel_y1_kernel_cuda);
} // namespace native
} // namespace at
|
ed5e109a81f6c95e2150bdb3a01c66cbc042536a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "KNN_CUDA.cuh"
__constant__ float constant_query[100 * DATA_SIZE];
__global__ void min_distance_coalesced_sh_query_constant(float* dataset, float* results, int dataset_size, int data_size, int query_size) {
__shared__ float sh_dataset[DATA_SIZE];
int row = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0;
float v = 0;
sh_dataset[threadIdx.x] = dataset[row];
__syncthreads();
if (threadIdx.x < query_size) {
for (int i = 0; i < DATA_SIZE; ++i) {
v = sh_dataset[i] - constant_query[i + threadIdx.x * DATA_SIZE];
d += v * v;
}
results[ blockIdx.x + threadIdx.x * dataset_size] = d;
__syncthreads();
}
}
void min_dinstance_coalesced_shDS_constQ_cuda(float* dataset, vector<float> query, thrust::device_vector<float> result, int* result_index, int dataset_size, int data_size, int query_size, int block_size) {
cout << "============= MIN DISTANCE COALESCED SH DATASET CONST QUERY ============" << endl << endl;
dim3 dim_grid_min_dist = dataset_size;
dim3 dim_block_min_dist = 128;
double startGPU;
float* constant_query_ptr;
int phase = ceil(query_size / 100);
int query_size_tmp = 0;
float* result_ptr;
int b;
startGPU = omp_get_wtime();
for (int i = 0; i < phase; i++) {
if (query_size_tmp + 100 <= query_size)
query_size_tmp += 100;
else {
query_size_tmp += query_size % 100;
b = query_size_tmp;
dim_grid_min_dist = ceil((float)dataset_size / b);
dim_block_min_dist = b;
}
result_ptr = thrust::raw_pointer_cast(&result[i * 100 * dataset_size]);
constant_query_ptr = &query[i * 100 * data_size];
hipMemcpyToSymbol(constant_query, constant_query_ptr, 100 * data_size * sizeof(float));
min_distance_coalesced_sh_query_constant << < dim_grid_min_dist, dim_block_min_dist >> > (dataset, result_ptr, dataset_size, data_size, query_size_tmp);
hipDeviceSynchronize();
}
result_ptr = thrust::raw_pointer_cast(&result[0]);
insertion_sort_cuda(result_ptr, result_index, dataset_size, query_size);
hipDeviceSynchronize();
printf_s("TIME: %.16g\n\n", omp_get_wtime() - startGPU);
} | ed5e109a81f6c95e2150bdb3a01c66cbc042536a.cu | #include "KNN_CUDA.cuh"
__constant__ float constant_query[100 * DATA_SIZE];
__global__ void min_distance_coalesced_sh_query_constant(float* dataset, float* results, int dataset_size, int data_size, int query_size) {
__shared__ float sh_dataset[DATA_SIZE];
int row = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0;
float v = 0;
sh_dataset[threadIdx.x] = dataset[row];
__syncthreads();
if (threadIdx.x < query_size) {
for (int i = 0; i < DATA_SIZE; ++i) {
v = sh_dataset[i] - constant_query[i + threadIdx.x * DATA_SIZE];
d += v * v;
}
results[ blockIdx.x + threadIdx.x * dataset_size] = d;
__syncthreads();
}
}
void min_dinstance_coalesced_shDS_constQ_cuda(float* dataset, vector<float> query, thrust::device_vector<float> result, int* result_index, int dataset_size, int data_size, int query_size, int block_size) {
cout << "============= MIN DISTANCE COALESCED SH DATASET CONST QUERY ============" << endl << endl;
dim3 dim_grid_min_dist = dataset_size;
dim3 dim_block_min_dist = 128;
double startGPU;
float* constant_query_ptr;
int phase = ceil(query_size / 100);
int query_size_tmp = 0;
float* result_ptr;
int b;
startGPU = omp_get_wtime();
for (int i = 0; i < phase; i++) {
if (query_size_tmp + 100 <= query_size)
query_size_tmp += 100;
else {
query_size_tmp += query_size % 100;
b = query_size_tmp;
dim_grid_min_dist = ceil((float)dataset_size / b);
dim_block_min_dist = b;
}
result_ptr = thrust::raw_pointer_cast(&result[i * 100 * dataset_size]);
constant_query_ptr = &query[i * 100 * data_size];
cudaMemcpyToSymbol(constant_query, constant_query_ptr, 100 * data_size * sizeof(float));
min_distance_coalesced_sh_query_constant << < dim_grid_min_dist, dim_block_min_dist >> > (dataset, result_ptr, dataset_size, data_size, query_size_tmp);
cudaDeviceSynchronize();
}
result_ptr = thrust::raw_pointer_cast(&result[0]);
insertion_sort_cuda(result_ptr, result_index, dataset_size, query_size);
cudaDeviceSynchronize();
printf_s("TIME: %.16g\n\n", omp_get_wtime() - startGPU);
} |
dcdfe8a3b95c408b1ee0a7b5c6eb798b7c422ade.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
void __global__ kernel(int **grid,int **newGrid,int dim,int maxIter)
{
int j;
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
if(i <=dim){
for (j = 1; j<=dim; j++) {
int numNeighbors =(((((((grid[i+1][j]+grid[i-1][j])+grid[i][j+1])+grid[i][j-1])+grid[i+1][j+1])+grid[i-1][j-1])+grid[i-1][j+1])+grid[i+1][j-1]);
if ((grid[i][j] == 1) && (numNeighbors < 2))
newGrid[i][j] = 0;
else if ((grid[i][j] == 1) && ((numNeighbors == 2) || (numNeighbors == 3)))
newGrid[i][j] = 1;
else if ((grid[i][j] == 1) && (numNeighbors > 3))
newGrid[i][j] = 0;
else if ((grid[i][j] == 0) && (numNeighbors == 3))
newGrid[i][j] = 1;
else
newGrid[i][j] = grid[i][j];
}
}
}
#include <stdio.h>
#include <stdlib.h>
#define SRAND_VALUE 1985
int getNeighbors(int **grid,int i,int j)
{
int numNeighbors;
//upper lower
numNeighbors = (((((((grid[i + 1][j] + grid[i - 1][j]) + grid[i][j + 1]) + grid[i][j - 1]) + grid[i + 1][j + 1]) + grid[i - 1][j - 1]) + grid[i - 1][j + 1]) + grid[i + 1][j - 1]);
//right left
//diagonals
return numNeighbors;
}
int main(int argc,char *argv[])
{
int i;
int j;
int iter;
int dim = 1024;
int maxIter = 1 << 10;
int **grid = (int **)(malloc((sizeof(int *) * (dim + 2))));
for (i = 0; i < (dim + 2); i++)
grid[i] = ((int *)(malloc((sizeof(int *) * (dim + 2)))));
int **newGrid = (int **)(malloc((sizeof(int *) * (dim + 2))));
for (i = 0; i < (dim + 2); i++)
newGrid[i] = ((int *)(malloc((sizeof(int *) * (dim + 2)))));
srand(1985);
for (i = 1; i <= dim; i++) {
for (j = 1; j <= dim; j++) {
grid[i][j] = (rand() % 2);
}
}
for (iter = 0; iter < maxIter; iter++) {
for (i = 1; i <= dim; i++) {
grid[i][0] = grid[i][dim];
grid[i][dim + 1] = grid[i][1];
}
for (j = 0; j <= (dim + 1); j++) {
grid[0][j] = grid[dim][j];
grid[dim + 1][j] = grid[1][j];
}
/***** Starting Parallalization *****/
//declare device variables
float elapsedTime;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int ** device_grid;
int ** device_newGrid;
//Allocate memory space in the GPU
hipMalloc((void **) &device_grid, sizeof(grid));
hipMalloc((void **) &device_newGrid, sizeof(newGrid));
//Copy from host to device
hipMemcpy(device_grid, grid, sizeof(grid), hipMemcpyHostToDevice);
hipMemcpy(device_newGrid, newGrid, sizeof(newGrid), hipMemcpyHostToDevice);
//launch kernel function
dim3 numThreads(32,32);
dim3 blocks((dim+ 31)/32, (dim+ 31)/32);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( kernel), dim3(blocks),dim3(numThreads), 0, 0, device_grid,device_newGrid, dim, maxIter);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("the elapsed time is %f\n", elapsedTime);
//copy back from device to host
hipFree(device_grid);
hipFree(device_newGrid);
/***** Ending Parallalization *****/
int **tmpGrid = grid;
grid = newGrid;
newGrid = tmpGrid;
}
int total = 0;
for (i = 1; i <= dim; i++) {
for (j = 1; j <= dim; j++) {
total += grid[i][j];
}
}
printf("Total Alive: %d\n",total);
free(grid);
free(newGrid);
return 0;
}
| dcdfe8a3b95c408b1ee0a7b5c6eb798b7c422ade.cu |
void __global__ kernel(int **grid,int **newGrid,int dim,int maxIter)
{
int j;
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
if(i <=dim){
for (j = 1; j<=dim; j++) {
int numNeighbors =(((((((grid[i+1][j]+grid[i-1][j])+grid[i][j+1])+grid[i][j-1])+grid[i+1][j+1])+grid[i-1][j-1])+grid[i-1][j+1])+grid[i+1][j-1]);
if ((grid[i][j] == 1) && (numNeighbors < 2))
newGrid[i][j] = 0;
else if ((grid[i][j] == 1) && ((numNeighbors == 2) || (numNeighbors == 3)))
newGrid[i][j] = 1;
else if ((grid[i][j] == 1) && (numNeighbors > 3))
newGrid[i][j] = 0;
else if ((grid[i][j] == 0) && (numNeighbors == 3))
newGrid[i][j] = 1;
else
newGrid[i][j] = grid[i][j];
}
}
}
#include <stdio.h>
#include <stdlib.h>
#define SRAND_VALUE 1985
int getNeighbors(int **grid,int i,int j)
{
int numNeighbors;
//upper lower
numNeighbors = (((((((grid[i + 1][j] + grid[i - 1][j]) + grid[i][j + 1]) + grid[i][j - 1]) + grid[i + 1][j + 1]) + grid[i - 1][j - 1]) + grid[i - 1][j + 1]) + grid[i + 1][j - 1]);
//right left
//diagonals
return numNeighbors;
}
int main(int argc,char *argv[])
{
int i;
int j;
int iter;
int dim = 1024;
int maxIter = 1 << 10;
int **grid = (int **)(malloc((sizeof(int *) * (dim + 2))));
for (i = 0; i < (dim + 2); i++)
grid[i] = ((int *)(malloc((sizeof(int *) * (dim + 2)))));
int **newGrid = (int **)(malloc((sizeof(int *) * (dim + 2))));
for (i = 0; i < (dim + 2); i++)
newGrid[i] = ((int *)(malloc((sizeof(int *) * (dim + 2)))));
srand(1985);
for (i = 1; i <= dim; i++) {
for (j = 1; j <= dim; j++) {
grid[i][j] = (rand() % 2);
}
}
for (iter = 0; iter < maxIter; iter++) {
for (i = 1; i <= dim; i++) {
grid[i][0] = grid[i][dim];
grid[i][dim + 1] = grid[i][1];
}
for (j = 0; j <= (dim + 1); j++) {
grid[0][j] = grid[dim][j];
grid[dim + 1][j] = grid[1][j];
}
/***** Starting Parallalization *****/
//declare device variables
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int ** device_grid;
int ** device_newGrid;
//Allocate memory space in the GPU
cudaMalloc((void **) &device_grid, sizeof(grid));
cudaMalloc((void **) &device_newGrid, sizeof(newGrid));
//Copy from host to device
cudaMemcpy(device_grid, grid, sizeof(grid), cudaMemcpyHostToDevice);
cudaMemcpy(device_newGrid, newGrid, sizeof(newGrid), cudaMemcpyHostToDevice);
//launch kernel function
dim3 numThreads(32,32);
dim3 blocks((dim+ 31)/32, (dim+ 31)/32);
cudaEventRecord(start, 0);
kernel<<<blocks,numThreads>>>(device_grid,device_newGrid, dim, maxIter);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("the elapsed time is %f\n", elapsedTime);
//copy back from device to host
cudaFree(device_grid);
cudaFree(device_newGrid);
/***** Ending Parallalization *****/
int **tmpGrid = grid;
grid = newGrid;
newGrid = tmpGrid;
}
int total = 0;
for (i = 1; i <= dim; i++) {
for (j = 1; j <= dim; j++) {
total += grid[i][j];
}
}
printf("Total Alive: %d\n",total);
free(grid);
free(newGrid);
return 0;
}
|
1d9f52e2536feb2e7d41ce6d8e8c4954b4d96ee4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void nlpm_shear0(float* nu, float* Phi2ZF, float dnlpm, float* kx,
float rho, float* ky, float shat, float* gds2, float* gds21, float*gds22, float* bmagInv, bool zonal_kx1_only)
{
unsigned int idz = get_idz();
unsigned int idy = 0;
unsigned int idx = get_idx();
int ikx1 = round(X0_d); //determine the index of the kx=1 mode
if(ikx1 > (nx-1)/3) ikx1=(nx-1)/3; //if kx=1 is not in the box, use the highest kx
unsigned int idx_zonal;
if(zonal_kx1_only) idx_zonal = ikx1;
else idx_zonal = idx;
if(nz<=zthreads) {
if(idz<nz && idx<nx) {
unsigned int idxz = idx + nx*idz;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[idz], gds21[idz], gds22[idz], bmagInv[idz]);
nu[idxz] = kx[idx]*flr(bidx)*sqrt(Phi2ZF[idx_zonal]);
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int idxz = idx + nx*IDZ;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[IDZ], gds21[IDZ], gds22[IDZ], bmagInv[IDZ]);
nu[idxz] = kx[idx]*flr(bidx)*sqrt(Phi2ZF[idx_zonal]);
}
}
}
}
//for when dorland_phase is complex
__global__ void nlpm_shear0(hipComplex* nu, hipComplex* PhiZF, float dnlpm, float* kx,
float rho, float* ky, float shat, float* gds2, float* gds21, float*gds22, float* bmagInv, bool zonal_kx1_only)
{
unsigned int idz = get_idz();
unsigned int idy = 0;
unsigned int idx = get_idx();
unsigned int idx_zonal;
if(zonal_kx1_only) {
int ikx1 = round(X0_d); //determine the index of the kx=1 mode
if(ikx1 > (nx-1)/3) ikx1=(nx-1)/3; //if kx=1 is not in the box, use the highest kx
idx_zonal = ikx1;
}
else idx_zonal = idx;
if(nz<=zthreads) {
if(idz<nz && idx<nx) {
unsigned int idxz = idx + nx*idz;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[idz], gds21[idz], gds22[idz], bmagInv[idz]);
nu[idxz] = kx[idx]*flr(bidx)*PhiZF[idx_zonal];
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int idxz = idx + nx*IDZ;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[IDZ], gds21[IDZ], gds22[IDZ], bmagInv[IDZ]);
nu[idxz] = kx[idx]*flr(bidx)*PhiZF[idx_zonal];
}
}
}
}
//for when dorland_phase is complex
__global__ void nlpm_shear0_ifac(hipComplex* nu, hipComplex* PhiZF, float dnlpm, float* kx,
float rho, float* ky, float shat, float* gds2, float* gds21, float*gds22, float* bmagInv, bool zonal_kx1_only)
{
unsigned int idz = get_idz();
unsigned int idy = 0;
unsigned int idx = get_idx();
unsigned int idx_zonal;
if(zonal_kx1_only) {
int ikx1 = round(X0_d); //determine the index of the kx=1 mode
if(ikx1 > (nx-1)/3) ikx1=(nx-1)/3; //if kx=1 is not in the box, use the highest kx
idx_zonal = ikx1;
}
else idx_zonal = idx;
if(nz<=zthreads) {
if(idz<nz && idx<nx) {
unsigned int idxz = idx + nx*idz;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[idz], gds21[idz], gds22[idz], bmagInv[idz]);
//need a factor of i
nu[idxz].x = -kx[idx]*flr(bidx)*PhiZF[idx_zonal].y;
nu[idxz].y = kx[idx]*flr(bidx)*PhiZF[idx_zonal].x;
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int idxz = idx + nx*IDZ;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[IDZ], gds21[IDZ], gds22[IDZ], bmagInv[IDZ]);
nu[idxz].x = -kx[idx]*flr(bidx)*PhiZF[idx_zonal].y;
nu[idxz].y = kx[idx]*flr(bidx)*PhiZF[idx_zonal].x;
}
}
}
}
__global__ void nlpm_shear1(float* nu, float* Phi2ZF, float dnlpm, float* kx,
float rho, float* ky, float shat, float* gds2, float* gds21, float*gds22, float* bmagInv, bool zonal_kx1_only)
{
unsigned int idz = get_idz();
unsigned int idy = 0;
unsigned int idx = get_idx();
//int ikx1 = round(X0_d); //determine the index of the kx=1 mode
//if(ikx1 > (nx-1)/3) ikx1=(nx-1)/3; //if kx=1 is not in the box, use the highest kx
//unsigned int idx_zonal;
//if(zonal_kx1_only) idx_zonal = ikx1;
//else idx_zonal = idx;
if(nz<=zthreads) {
if(idz<nz && idx<nx && idx!=0) {
unsigned int idxz = idx + nx*idz;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[idz], gds21[idz], gds22[idz], bmagInv[idz]);
nu[idxz] = abs(kx[idx])*abs(flr(bidx))*sqrt(Phi2ZF[idx]);
//nu[idxz] = flr(bidx);
//nu[idxz] = abs(kx[idx])*sqrt(Phi2ZF[idx]);//*abs(flr(bidx))*sqrt(Phi2ZF[idx_zonal]);
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int idxz = idx + nx*IDZ;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[IDZ], gds21[IDZ], gds22[IDZ], bmagInv[IDZ]);
nu[idxz] = abs(kx[idx])*abs(flr(bidx))*sqrt(Phi2ZF[idx]);
}
}
}
}
__global__ void nlpm_shear2(float* nu, float* Phi2ZF, float dnlpm, float* kx,
float rho, float* ky, float shat, float* gds2, float* gds21, float*gds22, float* bmagInv, bool zonal_kx1_only)
{
unsigned int idz = get_idz();
unsigned int idy = 0;
unsigned int idx = get_idx();
//int ikx1 = round(X0_d); //determine the index of the kx=1 mode
//if(ikx1 > (nx-1)/3) ikx1=(nx-1)/3; //if kx=1 is not in the box, use the highest kx
//unsigned int idx_zonal;
//if(zonal_kx1_only) idx_zonal = ikx1;
//else idx_zonal = idx;
if(nz<=zthreads) {
if(idz<nz && idx<nx) {
unsigned int idxz = idx + nx*idz;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[idz], gds21[idz], gds22[idz], bmagInv[idz]);
nu[idxz] = kx[idx]*kx[idx]*flr(bidx)*flr(bidx)*Phi2ZF[idx];
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int idxz = idx + nx*IDZ;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[IDZ], gds21[IDZ], gds22[IDZ], bmagInv[IDZ]);
nu[idxz] = pow(kx[idx],2)*pow(flr(bidx),2)*Phi2ZF[idx];
}
}
}
}
__global__ void get_ky0kx1_rms(float* Phi_zf_kx1, float* Phi2)
{
int ikx1 = round(X0_d); //determine the index of the kx=1 mode
if(ikx1 > (nx-1)/3) ikx1=(nx-1)/3; //if kx=1 is not in the box, use the highest kx
int iky0 = 0;
*Phi_zf_kx1 = sqrt(Phi2[iky0 + (ny/2+1)*ikx1]);
}
__global__ void get_kx1_rms(float* Phi_zf_kx1, float* Phi2_zonal)
{
int ikx1 = round(X0_d); //determine the index of the kx=1 mode
if(ikx1 > (nx-1)/3) ikx1=(nx-1)/3; //if kx=1 is not in the box, use the highest kx
*Phi_zf_kx1 = sqrt(Phi2_zonal[ikx1]);
}
__global__ void get_Dnlpm(float* Dnlpm, float Phi_zf_kx1, float low_cutoff, float high_cutoff, float nu, float dnlpm_max)
{
//float low_cutoff= .04; //smallest value of phi_zf that D_nlpm is an effect
//float high_cutoff = .08; //past this value D=1
float d = (Phi_zf_kx1 - low_cutoff)/(high_cutoff-low_cutoff);
if(d<0) d=0.; // 0 < D_nlpm < 1
if(d>dnlpm_max) d=dnlpm_max;
*Dnlpm = d;
}
__global__ void get_Dnlpm_quadratic(float* Dnlpm, float Phi_zf_kx1)
{
*Dnlpm = Phi_zf_kx1;
}
__global__ void nlpm(hipComplex* res, hipComplex* field, float* ky, float* nu_nlpm, float dnlpm)
{
unsigned int idy = get_idy();
unsigned int idx = get_idx();
unsigned int idz = get_idz();
if(nz<=zthreads) {
if(idy<(ny/2+1) && idx<nx && idz<nz) {
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*idz;
res[index] = field[index]*abs(ky[idy])*dnlpm*nu_nlpm[idz];
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idy<(ny/2+1) && idx<nx && idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*IDZ;
res[index] = field[index]*abs(ky[idy])*dnlpm*nu_nlpm[IDZ];
}
}
}
}
__global__ void nlpm_filter(hipComplex* field, float* nu_nlpm, float* ky, float dt_loc, float dnlpm, float kxfac)
{
unsigned int idx = get_idx();
unsigned int idy = get_idy();
unsigned int idz = get_idz();
if(nz<=zthreads) {
if(idy<(ny/2+1) && idx<nx && idz<nz) {
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*idz;
//if(nu_nlpm[idz] < 1e-6) nu_nlpm[idz] = 0.;
double tmp = (double) 1. + ((double) dt_loc*kxfac*(dnlpm)*nu_nlpm[idz]*ky[idy]);
field[index].x = field[index].x/( tmp );
field[index].y = field[index].y/( tmp );
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idy<(ny/2+1) && idx<nx && idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*IDZ;
field[index] = field[index]/( 1. + dt_loc*kxfac*(dnlpm)*nu_nlpm[IDZ]*ky[idy] );
}
}
}
}
__global__ void nlpm_filter_tmp(hipComplex* field, float* nu_nlpm, float* ky, float dt_loc, float dnlpm, float kxfac, float* tmp)
{
unsigned int idx = get_idx();
unsigned int idy = get_idy();
unsigned int idz = get_idz();
if(nz<=zthreads) {
if(idy<(ny/2+1) && idx<nx && idz<nz) {
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*idz;
//if(nu_nlpm[idz] < 1e-6) nu_nlpm[idz] = 0.;
tmp[index] = (double) 1. + ((double) dt_loc*kxfac*(dnlpm)*nu_nlpm[idz]*ky[idy]);
field[index].x = field[index].x/( tmp[index] );
field[index].y = field[index].y/( tmp[index] );
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idy<(ny/2+1) && idx<nx && idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*IDZ;
field[index] = field[index]/( 1. + dt_loc*kxfac*(dnlpm)*nu_nlpm[IDZ]*ky[idy] );
}
}
}
}
//for when dorland_phase is complex
__global__ void nlpm_filter(hipComplex* field, hipComplex* nu_nlpm, float* ky, float dt_loc, float dnlpm, float kxfac)
{
unsigned int idx = get_idx();
unsigned int idy = get_idy();
unsigned int idz = get_idz();
if(nz<=zthreads) {
if(idy<(ny/2+1) && idx<nx && idz<nz) {
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*idz;
field[index] = field[index]/( make_cuComplex(1.,0.) + dt_loc*kxfac*(dnlpm)*nu_nlpm[idz]*ky[idy] );
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idy<(ny/2+1) && idx<nx && idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*IDZ;
field[index] = field[index]/( make_cuComplex(1.,0.) + dt_loc*kxfac*(dnlpm)*nu_nlpm[IDZ]*ky[idy] );
}
}
}
}
__global__ void nlpm_filter(hipComplex* field, float* nu_nlpm, float* ky, float dt_loc, float* Dnlpm, float kxfac)
{
unsigned int idx = get_idx();
unsigned int idy = get_idy();
unsigned int idz = get_idz();
if(nz<=zthreads) {
if(idy<(ny/2+1) && idx<nx && idz<nz) {
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*idz;
field[index] = field[index]/( 1. + dt_loc*(*Dnlpm)*kxfac*nu_nlpm[idz]*ky[idy] );
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idy<(ny/2+1) && idx<nx && idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*IDZ;
field[index] = field[index]/( 1. + dt_loc*(*Dnlpm)*kxfac*nu_nlpm[IDZ]*ky[idy] );
}
}
}
}
__global__ void nlpm_filter_kxdep(hipComplex* field, float* ky, float* kx, float dt_loc, float dnlpm, float kxfac, float c_abs, float* PhiZF_abs_X, float c_complex, hipComplex* PhiZF_complex_X,
float rho, float shat, float* gds2, float* gds21, float*gds22, float* bmagInv)
{
unsigned int idx = get_idx();
unsigned int idy = get_idy();
unsigned int idz = get_idz();
if(nz<=zthreads) {
if(idy<(ny/2+1) && idx<nx && idz<nz) {
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*idz;
double bidx = b(rho, kx[idx], ky[0], shat, gds2[idz], gds21[idz], gds22[idz], bmagInv[idz]); //only want ky=0 component of kperp**2
hipComplex nu_nlpm;
nu_nlpm.x = c_abs*abs(ky[idy])*abs(kx[idx])*abs(flr(bidx))*PhiZF_abs_X[idx] + c_complex*ky[idy]*kx[idx]*flr(bidx)*PhiZF_complex_X[idx].x;
nu_nlpm.y = c_complex*ky[idy]*kx[idx]*flr(bidx)*PhiZF_complex_X[idx].y;
field[index] = field[index]/( make_cuComplex(1.,0.) + dt_loc*kxfac*(dnlpm)*nu_nlpm );
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idy<(ny/2+1) && idx<nx && idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*IDZ;
double bidx = b(rho, kx[idx], ky[0], shat, gds2[IDZ], gds21[IDZ], gds22[IDZ], bmagInv[IDZ]);
hipComplex nu_nlpm;
nu_nlpm.x = c_abs*abs(ky[idy])*abs(kx[idx])*abs(flr(bidx))*PhiZF_abs_X[idx] + c_complex*ky[idy]*kx[idx]*flr(bidx)*PhiZF_complex_X[idx].x;
nu_nlpm.y = c_complex*ky[idy]*kx[idx]*flr(bidx)*PhiZF_complex_X[idx].y;
field[index] = field[index]/( make_cuComplex(1.,0.) + dt_loc*kxfac*(dnlpm)*nu_nlpm );
}
}
}
}
| 1d9f52e2536feb2e7d41ce6d8e8c4954b4d96ee4.cu | __global__ void nlpm_shear0(float* nu, float* Phi2ZF, float dnlpm, float* kx,
float rho, float* ky, float shat, float* gds2, float* gds21, float*gds22, float* bmagInv, bool zonal_kx1_only)
{
unsigned int idz = get_idz();
unsigned int idy = 0;
unsigned int idx = get_idx();
int ikx1 = round(X0_d); //determine the index of the kx=1 mode
if(ikx1 > (nx-1)/3) ikx1=(nx-1)/3; //if kx=1 is not in the box, use the highest kx
unsigned int idx_zonal;
if(zonal_kx1_only) idx_zonal = ikx1;
else idx_zonal = idx;
if(nz<=zthreads) {
if(idz<nz && idx<nx) {
unsigned int idxz = idx + nx*idz;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[idz], gds21[idz], gds22[idz], bmagInv[idz]);
nu[idxz] = kx[idx]*flr(bidx)*sqrt(Phi2ZF[idx_zonal]);
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int idxz = idx + nx*IDZ;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[IDZ], gds21[IDZ], gds22[IDZ], bmagInv[IDZ]);
nu[idxz] = kx[idx]*flr(bidx)*sqrt(Phi2ZF[idx_zonal]);
}
}
}
}
//for when dorland_phase is complex
__global__ void nlpm_shear0(cuComplex* nu, cuComplex* PhiZF, float dnlpm, float* kx,
float rho, float* ky, float shat, float* gds2, float* gds21, float*gds22, float* bmagInv, bool zonal_kx1_only)
{
unsigned int idz = get_idz();
unsigned int idy = 0;
unsigned int idx = get_idx();
unsigned int idx_zonal;
if(zonal_kx1_only) {
int ikx1 = round(X0_d); //determine the index of the kx=1 mode
if(ikx1 > (nx-1)/3) ikx1=(nx-1)/3; //if kx=1 is not in the box, use the highest kx
idx_zonal = ikx1;
}
else idx_zonal = idx;
if(nz<=zthreads) {
if(idz<nz && idx<nx) {
unsigned int idxz = idx + nx*idz;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[idz], gds21[idz], gds22[idz], bmagInv[idz]);
nu[idxz] = kx[idx]*flr(bidx)*PhiZF[idx_zonal];
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int idxz = idx + nx*IDZ;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[IDZ], gds21[IDZ], gds22[IDZ], bmagInv[IDZ]);
nu[idxz] = kx[idx]*flr(bidx)*PhiZF[idx_zonal];
}
}
}
}
//for when dorland_phase is complex
__global__ void nlpm_shear0_ifac(cuComplex* nu, cuComplex* PhiZF, float dnlpm, float* kx,
float rho, float* ky, float shat, float* gds2, float* gds21, float*gds22, float* bmagInv, bool zonal_kx1_only)
{
unsigned int idz = get_idz();
unsigned int idy = 0;
unsigned int idx = get_idx();
unsigned int idx_zonal;
if(zonal_kx1_only) {
int ikx1 = round(X0_d); //determine the index of the kx=1 mode
if(ikx1 > (nx-1)/3) ikx1=(nx-1)/3; //if kx=1 is not in the box, use the highest kx
idx_zonal = ikx1;
}
else idx_zonal = idx;
if(nz<=zthreads) {
if(idz<nz && idx<nx) {
unsigned int idxz = idx + nx*idz;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[idz], gds21[idz], gds22[idz], bmagInv[idz]);
//need a factor of i
nu[idxz].x = -kx[idx]*flr(bidx)*PhiZF[idx_zonal].y;
nu[idxz].y = kx[idx]*flr(bidx)*PhiZF[idx_zonal].x;
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int idxz = idx + nx*IDZ;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[IDZ], gds21[IDZ], gds22[IDZ], bmagInv[IDZ]);
nu[idxz].x = -kx[idx]*flr(bidx)*PhiZF[idx_zonal].y;
nu[idxz].y = kx[idx]*flr(bidx)*PhiZF[idx_zonal].x;
}
}
}
}
__global__ void nlpm_shear1(float* nu, float* Phi2ZF, float dnlpm, float* kx,
float rho, float* ky, float shat, float* gds2, float* gds21, float*gds22, float* bmagInv, bool zonal_kx1_only)
{
unsigned int idz = get_idz();
unsigned int idy = 0;
unsigned int idx = get_idx();
//int ikx1 = round(X0_d); //determine the index of the kx=1 mode
//if(ikx1 > (nx-1)/3) ikx1=(nx-1)/3; //if kx=1 is not in the box, use the highest kx
//unsigned int idx_zonal;
//if(zonal_kx1_only) idx_zonal = ikx1;
//else idx_zonal = idx;
if(nz<=zthreads) {
if(idz<nz && idx<nx && idx!=0) {
unsigned int idxz = idx + nx*idz;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[idz], gds21[idz], gds22[idz], bmagInv[idz]);
nu[idxz] = abs(kx[idx])*abs(flr(bidx))*sqrt(Phi2ZF[idx]);
//nu[idxz] = flr(bidx);
//nu[idxz] = abs(kx[idx])*sqrt(Phi2ZF[idx]);//*abs(flr(bidx))*sqrt(Phi2ZF[idx_zonal]);
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int idxz = idx + nx*IDZ;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[IDZ], gds21[IDZ], gds22[IDZ], bmagInv[IDZ]);
nu[idxz] = abs(kx[idx])*abs(flr(bidx))*sqrt(Phi2ZF[idx]);
}
}
}
}
__global__ void nlpm_shear2(float* nu, float* Phi2ZF, float dnlpm, float* kx,
float rho, float* ky, float shat, float* gds2, float* gds21, float*gds22, float* bmagInv, bool zonal_kx1_only)
{
unsigned int idz = get_idz();
unsigned int idy = 0;
unsigned int idx = get_idx();
//int ikx1 = round(X0_d); //determine the index of the kx=1 mode
//if(ikx1 > (nx-1)/3) ikx1=(nx-1)/3; //if kx=1 is not in the box, use the highest kx
//unsigned int idx_zonal;
//if(zonal_kx1_only) idx_zonal = ikx1;
//else idx_zonal = idx;
if(nz<=zthreads) {
if(idz<nz && idx<nx) {
unsigned int idxz = idx + nx*idz;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[idz], gds21[idz], gds22[idz], bmagInv[idz]);
nu[idxz] = kx[idx]*kx[idx]*flr(bidx)*flr(bidx)*Phi2ZF[idx];
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int idxz = idx + nx*IDZ;
double bidx = b(rho, kx[idx], ky[idy], shat, gds2[IDZ], gds21[IDZ], gds22[IDZ], bmagInv[IDZ]);
nu[idxz] = pow(kx[idx],2)*pow(flr(bidx),2)*Phi2ZF[idx];
}
}
}
}
__global__ void get_ky0kx1_rms(float* Phi_zf_kx1, float* Phi2)
{
int ikx1 = round(X0_d); //determine the index of the kx=1 mode
if(ikx1 > (nx-1)/3) ikx1=(nx-1)/3; //if kx=1 is not in the box, use the highest kx
int iky0 = 0;
*Phi_zf_kx1 = sqrt(Phi2[iky0 + (ny/2+1)*ikx1]);
}
__global__ void get_kx1_rms(float* Phi_zf_kx1, float* Phi2_zonal)
{
int ikx1 = round(X0_d); //determine the index of the kx=1 mode
if(ikx1 > (nx-1)/3) ikx1=(nx-1)/3; //if kx=1 is not in the box, use the highest kx
*Phi_zf_kx1 = sqrt(Phi2_zonal[ikx1]);
}
__global__ void get_Dnlpm(float* Dnlpm, float Phi_zf_kx1, float low_cutoff, float high_cutoff, float nu, float dnlpm_max)
{
//float low_cutoff= .04; //smallest value of phi_zf that D_nlpm is an effect
//float high_cutoff = .08; //past this value D=1
float d = (Phi_zf_kx1 - low_cutoff)/(high_cutoff-low_cutoff);
if(d<0) d=0.; // 0 < D_nlpm < 1
if(d>dnlpm_max) d=dnlpm_max;
*Dnlpm = d;
}
__global__ void get_Dnlpm_quadratic(float* Dnlpm, float Phi_zf_kx1)
{
*Dnlpm = Phi_zf_kx1;
}
__global__ void nlpm(cuComplex* res, cuComplex* field, float* ky, float* nu_nlpm, float dnlpm)
{
unsigned int idy = get_idy();
unsigned int idx = get_idx();
unsigned int idz = get_idz();
if(nz<=zthreads) {
if(idy<(ny/2+1) && idx<nx && idz<nz) {
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*idz;
res[index] = field[index]*abs(ky[idy])*dnlpm*nu_nlpm[idz];
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idy<(ny/2+1) && idx<nx && idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*IDZ;
res[index] = field[index]*abs(ky[idy])*dnlpm*nu_nlpm[IDZ];
}
}
}
}
__global__ void nlpm_filter(cuComplex* field, float* nu_nlpm, float* ky, float dt_loc, float dnlpm, float kxfac)
{
unsigned int idx = get_idx();
unsigned int idy = get_idy();
unsigned int idz = get_idz();
if(nz<=zthreads) {
if(idy<(ny/2+1) && idx<nx && idz<nz) {
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*idz;
//if(nu_nlpm[idz] < 1e-6) nu_nlpm[idz] = 0.;
double tmp = (double) 1. + ((double) dt_loc*kxfac*(dnlpm)*nu_nlpm[idz]*ky[idy]);
field[index].x = field[index].x/( tmp );
field[index].y = field[index].y/( tmp );
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idy<(ny/2+1) && idx<nx && idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*IDZ;
field[index] = field[index]/( 1. + dt_loc*kxfac*(dnlpm)*nu_nlpm[IDZ]*ky[idy] );
}
}
}
}
__global__ void nlpm_filter_tmp(cuComplex* field, float* nu_nlpm, float* ky, float dt_loc, float dnlpm, float kxfac, float* tmp)
{
unsigned int idx = get_idx();
unsigned int idy = get_idy();
unsigned int idz = get_idz();
if(nz<=zthreads) {
if(idy<(ny/2+1) && idx<nx && idz<nz) {
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*idz;
//if(nu_nlpm[idz] < 1e-6) nu_nlpm[idz] = 0.;
tmp[index] = (double) 1. + ((double) dt_loc*kxfac*(dnlpm)*nu_nlpm[idz]*ky[idy]);
field[index].x = field[index].x/( tmp[index] );
field[index].y = field[index].y/( tmp[index] );
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idy<(ny/2+1) && idx<nx && idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*IDZ;
field[index] = field[index]/( 1. + dt_loc*kxfac*(dnlpm)*nu_nlpm[IDZ]*ky[idy] );
}
}
}
}
//for when dorland_phase is complex
__global__ void nlpm_filter(cuComplex* field, cuComplex* nu_nlpm, float* ky, float dt_loc, float dnlpm, float kxfac)
{
unsigned int idx = get_idx();
unsigned int idy = get_idy();
unsigned int idz = get_idz();
if(nz<=zthreads) {
if(idy<(ny/2+1) && idx<nx && idz<nz) {
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*idz;
field[index] = field[index]/( make_cuComplex(1.,0.) + dt_loc*kxfac*(dnlpm)*nu_nlpm[idz]*ky[idy] );
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idy<(ny/2+1) && idx<nx && idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*IDZ;
field[index] = field[index]/( make_cuComplex(1.,0.) + dt_loc*kxfac*(dnlpm)*nu_nlpm[IDZ]*ky[idy] );
}
}
}
}
__global__ void nlpm_filter(cuComplex* field, float* nu_nlpm, float* ky, float dt_loc, float* Dnlpm, float kxfac)
{
unsigned int idx = get_idx();
unsigned int idy = get_idy();
unsigned int idz = get_idz();
if(nz<=zthreads) {
if(idy<(ny/2+1) && idx<nx && idz<nz) {
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*idz;
field[index] = field[index]/( 1. + dt_loc*(*Dnlpm)*kxfac*nu_nlpm[idz]*ky[idy] );
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idy<(ny/2+1) && idx<nx && idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*IDZ;
field[index] = field[index]/( 1. + dt_loc*(*Dnlpm)*kxfac*nu_nlpm[IDZ]*ky[idy] );
}
}
}
}
__global__ void nlpm_filter_kxdep(cuComplex* field, float* ky, float* kx, float dt_loc, float dnlpm, float kxfac, float c_abs, float* PhiZF_abs_X, float c_complex, cuComplex* PhiZF_complex_X,
float rho, float shat, float* gds2, float* gds21, float*gds22, float* bmagInv)
{
unsigned int idx = get_idx();
unsigned int idy = get_idy();
unsigned int idz = get_idz();
if(nz<=zthreads) {
if(idy<(ny/2+1) && idx<nx && idz<nz) {
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*idz;
double bidx = b(rho, kx[idx], ky[0], shat, gds2[idz], gds21[idz], gds22[idz], bmagInv[idz]); //only want ky=0 component of kperp**2
cuComplex nu_nlpm;
nu_nlpm.x = c_abs*abs(ky[idy])*abs(kx[idx])*abs(flr(bidx))*PhiZF_abs_X[idx] + c_complex*ky[idy]*kx[idx]*flr(bidx)*PhiZF_complex_X[idx].x;
nu_nlpm.y = c_complex*ky[idy]*kx[idx]*flr(bidx)*PhiZF_complex_X[idx].y;
field[index] = field[index]/( make_cuComplex(1.,0.) + dt_loc*kxfac*(dnlpm)*nu_nlpm );
}
}
else {
for(int i=0; i<nz/zthreads; i++) {
if(idy<(ny/2+1) && idx<nx && idz<zthreads) {
unsigned int IDZ = idz + zthreads*i;
unsigned int index = idy + (ny/2+1)*idx + nx*(ny/2+1)*IDZ;
double bidx = b(rho, kx[idx], ky[0], shat, gds2[IDZ], gds21[IDZ], gds22[IDZ], bmagInv[IDZ]);
cuComplex nu_nlpm;
nu_nlpm.x = c_abs*abs(ky[idy])*abs(kx[idx])*abs(flr(bidx))*PhiZF_abs_X[idx] + c_complex*ky[idy]*kx[idx]*flr(bidx)*PhiZF_complex_X[idx].x;
nu_nlpm.y = c_complex*ky[idy]*kx[idx]*flr(bidx)*PhiZF_complex_X[idx].y;
field[index] = field[index]/( make_cuComplex(1.,0.) + dt_loc*kxfac*(dnlpm)*nu_nlpm );
}
}
}
}
|
9789c8657bc56337b3255ff81a801c98673d1343.hip | // !!! This is a file automatically generated by hipify!!!
#include "include/common/cuda_error_hadling.h"
#include "include/common_data_structures/containers.h"
#include <hip/hip_runtime_api.h>
#include <cfloat>
#include <iostream>
#include <fstream>
#include <set>
using namespace std;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ bfs_kernel(int *_src_ids, int *_dst_ids, bool *_in_trees, long long _edges_count, int *_bfs_level,
int _current_level, bool *_terminate)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _edges_count)
{
int src_id = _src_ids[idx];
int dst_id = _dst_ids[idx];
if((_bfs_level[src_id] == _current_level) && (_bfs_level[dst_id] == -1))
{
_bfs_level[dst_id] = _current_level + 1;
_in_trees[idx] = true;
_terminate[0] = false;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ init_D_kernel(int *_D, int _vertices_count)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _vertices_count)
{
_D[idx] = 1;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ compute_D_kernel(int *_src_ids, int *_dst_ids, int _edges_count, int *_N, int *_D, int *_bfs_level, int _current_level)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _edges_count)
{
int src_id = _src_ids[idx];
int dst_id = _dst_ids[idx];
if ((_N[dst_id] > _N[src_id]) && (_bfs_level[src_id] == _current_level))
{
atomicAdd(&_D[src_id], _D[dst_id]);
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ init_L_H_kernel(int *_L, int *_H, int *_N, int _vertices_count)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _vertices_count)
{
_L[idx] = _N[idx];
_H[idx] = _N[idx];
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ optimize_L_H_using_tree_kernel(int *_src_ids, int *_dst_ids, int _edges_count, int *_L, int *_H, int *_N,
int *_bfs_level, int _level)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _edges_count)
{
int src_id = _src_ids[idx];
int dst_id = _dst_ids[idx];
if (_bfs_level[src_id] == _level)
{
if (_N[dst_id] > _N[src_id])
{
atomicMin(&_L[src_id], _L[dst_id]);
atomicMax(&_H[src_id], _H[dst_id]);
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ optimize_L_H_using_graph_kernel(int *_src_ids, int *_dst_ids, bool *_in_trees, int _edges_count,
int *_L, int *_H, int *_N, int *_bfs_level, int _level)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _edges_count)
{
int src_id = _src_ids[idx];
int dst_id = _dst_ids[idx];
if (_bfs_level[src_id] == _level)
{
if (!_in_trees[idx])
{
atomicMin(&_L[src_id], _N[dst_id]);
atomicMax(&_H[src_id], _N[dst_id]);
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ process_results_kernel(int *_src_ids, int *_dst_ids, bool *_bridges, int _edges_count,
int *_L, int *_H, int *_D, int *_N)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _edges_count)
{
int src_id = _src_ids[idx];
int dst_id = _dst_ids[idx];
if (_N[dst_id] > _N[src_id])
{
if ((_L[dst_id] == _N[dst_id]) && (_H[dst_id] < (_N[dst_id] + _D[dst_id])))
{
_bridges[idx] = true;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// wrappers
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void tarjan_bfs_wrapper(int *_device_src_ids, int *_device_dst_ids, bool *_device_in_trees, long long _edges_count,
int *_device_bfs_level, int _vertices_count, int &_max_level, int _root)
{
dim3 threads(1024, 1, 1);
dim3 grid_edges((_edges_count - 1) / threads.x + 1, 1, 1);
SAFE_CALL(hipMemset(_device_in_trees, 0, sizeof(bool) * _edges_count));
int current_level = 1;
SAFE_CALL(hipMemcpy(&_device_bfs_level[_root], ¤t_level, sizeof(int), hipMemcpyHostToDevice));
// do parallel bfs
bool host_terminate = false;
bool *device_terminate;
SAFE_CALL(hipMalloc((void**)&device_terminate, sizeof(bool)));
do
{
host_terminate = true;
SAFE_CALL(hipMemcpy(device_terminate, &host_terminate, sizeof(bool), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( SAFE_KERNEL_CALL((bfs_kernel) , dim3(grid_edges), dim3(threads) , 0, 0, _device_src_ids, _device_dst_ids,
_device_in_trees, _edges_count, _device_bfs_level,
current_level, device_terminate)));
SAFE_CALL(hipMemcpy(&host_terminate, device_terminate, sizeof(bool), hipMemcpyDeviceToHost));
current_level++;
} while (host_terminate == false);
SAFE_CALL(hipFree(device_terminate));
if(current_level > _max_level)
_max_level = current_level;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void compute_D_wrapper(int *_device_trees_src_ids, int *_device_trees_dst_ids, int _trees_edges_count,
int *_device_D, int *_device_N, int *_device_bfs_level, int _vertices_count, int _max_level)
{
cout << "D wrapper " << _trees_edges_count << endl;
dim3 threads(1024, 1, 1);
dim3 grid_vertices((_vertices_count - 1) / threads.x + 1, 1, 1);
dim3 grid_edges((_trees_edges_count - 1) / threads.x + 1, 1, 1);
SAFE_KERNEL_CALL((hipLaunchKernelGGL(( init_D_kernel) , dim3(grid_vertices), dim3(threads) , 0, 0, _device_D, _vertices_count) ));
for (int level = _max_level; level >= 0; level--)
{
SAFE_KERNEL_CALL((hipLaunchKernelGGL(( compute_D_kernel) , dim3(grid_edges), dim3(threads) , 0, 0, _device_trees_src_ids, _device_trees_dst_ids,
_trees_edges_count, _device_N, _device_D, _device_bfs_level, level) ));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void compute_L_H_wrapper(int *_device_src_ids, int *_device_dst_ids, bool *_device_in_trees, int _edges_count,
int *_device_trees_src_ids, int *_device_trees_dst_ids, int _trees_edges_count,
int *_device_L, int *_device_H, int *_device_N, int *_device_bfs_level, int _vertices_count, int _max_level)
{
cout << "L H wrapper " << endl;
dim3 threads(1024, 1, 1);
dim3 grid_vertices((_vertices_count - 1) / threads.x + 1, 1, 1);
dim3 grid_edges((_edges_count - 1) / threads.x + 1, 1, 1);
dim3 grid_trees_edges((_trees_edges_count - 1) / threads.x + 1, 1, 1);
// init using numbers
hipLaunchKernelGGL(( SAFE_KERNEL_CALL((init_L_H_kernel) , dim3(grid_vertices), dim3(threads) , 0, 0, _device_L, _device_H, _device_N, _vertices_count)));
// optimize
for(int level = _max_level; level >= 1; level--)
{
hipLaunchKernelGGL(( SAFE_KERNEL_CALL((optimize_L_H_using_tree_kernel) , dim3(grid_trees_edges), dim3(threads) , 0, 0,
_device_trees_src_ids, _device_trees_dst_ids, _trees_edges_count, _device_L, _device_H, _device_N,
_device_bfs_level, level)));
hipLaunchKernelGGL(( SAFE_KERNEL_CALL((optimize_L_H_using_graph_kernel) , dim3(grid_edges), dim3(threads) , 0, 0,
_device_src_ids, _device_dst_ids, _device_in_trees, _edges_count, _device_L, _device_H, _device_N,
_device_bfs_level, level)));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void process_results_wrapper(int *_device_src_ids, int *_device_dst_ids, bool *_device_bridges, int _edges_count,
int *_device_L, int *_device_H, int *_device_D, int *_device_N)
{
dim3 threads(1024, 1, 1);
dim3 grid_edges((_edges_count - 1) / threads.x + 1, 1, 1);
hipLaunchKernelGGL(( SAFE_KERNEL_CALL((process_results_kernel) , dim3(grid_edges), dim3(threads) , 0, 0, _device_src_ids, _device_dst_ids, _device_bridges, _edges_count,
_device_L, _device_H, _device_D, _device_N)));
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
| 9789c8657bc56337b3255ff81a801c98673d1343.cu | #include "include/common/cuda_error_hadling.h"
#include "include/common_data_structures/containers.h"
#include <cuda_runtime_api.h>
#include <cfloat>
#include <iostream>
#include <fstream>
#include <set>
using namespace std;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ bfs_kernel(int *_src_ids, int *_dst_ids, bool *_in_trees, long long _edges_count, int *_bfs_level,
int _current_level, bool *_terminate)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _edges_count)
{
int src_id = _src_ids[idx];
int dst_id = _dst_ids[idx];
if((_bfs_level[src_id] == _current_level) && (_bfs_level[dst_id] == -1))
{
_bfs_level[dst_id] = _current_level + 1;
_in_trees[idx] = true;
_terminate[0] = false;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ init_D_kernel(int *_D, int _vertices_count)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _vertices_count)
{
_D[idx] = 1;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ compute_D_kernel(int *_src_ids, int *_dst_ids, int _edges_count, int *_N, int *_D, int *_bfs_level, int _current_level)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _edges_count)
{
int src_id = _src_ids[idx];
int dst_id = _dst_ids[idx];
if ((_N[dst_id] > _N[src_id]) && (_bfs_level[src_id] == _current_level))
{
atomicAdd(&_D[src_id], _D[dst_id]);
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ init_L_H_kernel(int *_L, int *_H, int *_N, int _vertices_count)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _vertices_count)
{
_L[idx] = _N[idx];
_H[idx] = _N[idx];
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ optimize_L_H_using_tree_kernel(int *_src_ids, int *_dst_ids, int _edges_count, int *_L, int *_H, int *_N,
int *_bfs_level, int _level)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _edges_count)
{
int src_id = _src_ids[idx];
int dst_id = _dst_ids[idx];
if (_bfs_level[src_id] == _level)
{
if (_N[dst_id] > _N[src_id])
{
atomicMin(&_L[src_id], _L[dst_id]);
atomicMax(&_H[src_id], _H[dst_id]);
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ optimize_L_H_using_graph_kernel(int *_src_ids, int *_dst_ids, bool *_in_trees, int _edges_count,
int *_L, int *_H, int *_N, int *_bfs_level, int _level)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _edges_count)
{
int src_id = _src_ids[idx];
int dst_id = _dst_ids[idx];
if (_bfs_level[src_id] == _level)
{
if (!_in_trees[idx])
{
atomicMin(&_L[src_id], _N[dst_id]);
atomicMax(&_H[src_id], _N[dst_id]);
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ process_results_kernel(int *_src_ids, int *_dst_ids, bool *_bridges, int _edges_count,
int *_L, int *_H, int *_D, int *_N)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _edges_count)
{
int src_id = _src_ids[idx];
int dst_id = _dst_ids[idx];
if (_N[dst_id] > _N[src_id])
{
if ((_L[dst_id] == _N[dst_id]) && (_H[dst_id] < (_N[dst_id] + _D[dst_id])))
{
_bridges[idx] = true;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// wrappers
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void tarjan_bfs_wrapper(int *_device_src_ids, int *_device_dst_ids, bool *_device_in_trees, long long _edges_count,
int *_device_bfs_level, int _vertices_count, int &_max_level, int _root)
{
dim3 threads(1024, 1, 1);
dim3 grid_edges((_edges_count - 1) / threads.x + 1, 1, 1);
SAFE_CALL(cudaMemset(_device_in_trees, 0, sizeof(bool) * _edges_count));
int current_level = 1;
SAFE_CALL(cudaMemcpy(&_device_bfs_level[_root], ¤t_level, sizeof(int), cudaMemcpyHostToDevice));
// do parallel bfs
bool host_terminate = false;
bool *device_terminate;
SAFE_CALL(cudaMalloc((void**)&device_terminate, sizeof(bool)));
do
{
host_terminate = true;
SAFE_CALL(cudaMemcpy(device_terminate, &host_terminate, sizeof(bool), cudaMemcpyHostToDevice));
SAFE_KERNEL_CALL((bfs_kernel <<< grid_edges, threads >>> (_device_src_ids, _device_dst_ids,
_device_in_trees, _edges_count, _device_bfs_level,
current_level, device_terminate)));
SAFE_CALL(cudaMemcpy(&host_terminate, device_terminate, sizeof(bool), cudaMemcpyDeviceToHost));
current_level++;
} while (host_terminate == false);
SAFE_CALL(cudaFree(device_terminate));
if(current_level > _max_level)
_max_level = current_level;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void compute_D_wrapper(int *_device_trees_src_ids, int *_device_trees_dst_ids, int _trees_edges_count,
int *_device_D, int *_device_N, int *_device_bfs_level, int _vertices_count, int _max_level)
{
cout << "D wrapper " << _trees_edges_count << endl;
dim3 threads(1024, 1, 1);
dim3 grid_vertices((_vertices_count - 1) / threads.x + 1, 1, 1);
dim3 grid_edges((_trees_edges_count - 1) / threads.x + 1, 1, 1);
SAFE_KERNEL_CALL(( init_D_kernel <<< grid_vertices, threads >>> (_device_D, _vertices_count) ));
for (int level = _max_level; level >= 0; level--)
{
SAFE_KERNEL_CALL(( compute_D_kernel <<< grid_edges, threads >>> (_device_trees_src_ids, _device_trees_dst_ids,
_trees_edges_count, _device_N, _device_D, _device_bfs_level, level) ));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void compute_L_H_wrapper(int *_device_src_ids, int *_device_dst_ids, bool *_device_in_trees, int _edges_count,
int *_device_trees_src_ids, int *_device_trees_dst_ids, int _trees_edges_count,
int *_device_L, int *_device_H, int *_device_N, int *_device_bfs_level, int _vertices_count, int _max_level)
{
cout << "L H wrapper " << endl;
dim3 threads(1024, 1, 1);
dim3 grid_vertices((_vertices_count - 1) / threads.x + 1, 1, 1);
dim3 grid_edges((_edges_count - 1) / threads.x + 1, 1, 1);
dim3 grid_trees_edges((_trees_edges_count - 1) / threads.x + 1, 1, 1);
// init using numbers
SAFE_KERNEL_CALL((init_L_H_kernel <<< grid_vertices, threads >>> (_device_L, _device_H, _device_N, _vertices_count)));
// optimize
for(int level = _max_level; level >= 1; level--)
{
SAFE_KERNEL_CALL((optimize_L_H_using_tree_kernel <<< grid_trees_edges, threads >>>
(_device_trees_src_ids, _device_trees_dst_ids, _trees_edges_count, _device_L, _device_H, _device_N,
_device_bfs_level, level)));
SAFE_KERNEL_CALL((optimize_L_H_using_graph_kernel <<< grid_edges, threads >>>
(_device_src_ids, _device_dst_ids, _device_in_trees, _edges_count, _device_L, _device_H, _device_N,
_device_bfs_level, level)));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void process_results_wrapper(int *_device_src_ids, int *_device_dst_ids, bool *_device_bridges, int _edges_count,
int *_device_L, int *_device_H, int *_device_D, int *_device_N)
{
dim3 threads(1024, 1, 1);
dim3 grid_edges((_edges_count - 1) / threads.x + 1, 1, 1);
SAFE_KERNEL_CALL((process_results_kernel <<< grid_edges, threads >>> (_device_src_ids, _device_dst_ids, _device_bridges, _edges_count,
_device_L, _device_H, _device_D, _device_N)));
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
2ce3a65dbef3708a06959edd9849995d63b00946.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "calculateGaussianKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gaussKernel = NULL;
hipMalloc(&gaussKernel, XSIZE*YSIZE);
const float sigma = 1;
int halfKernelWidth = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
calculateGaussianKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, gaussKernel,sigma,halfKernelWidth);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
calculateGaussianKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, gaussKernel,sigma,halfKernelWidth);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
calculateGaussianKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, gaussKernel,sigma,halfKernelWidth);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2ce3a65dbef3708a06959edd9849995d63b00946.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "calculateGaussianKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gaussKernel = NULL;
cudaMalloc(&gaussKernel, XSIZE*YSIZE);
const float sigma = 1;
int halfKernelWidth = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
calculateGaussianKernel<<<gridBlock,threadBlock>>>(gaussKernel,sigma,halfKernelWidth);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
calculateGaussianKernel<<<gridBlock,threadBlock>>>(gaussKernel,sigma,halfKernelWidth);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
calculateGaussianKernel<<<gridBlock,threadBlock>>>(gaussKernel,sigma,halfKernelWidth);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
05c42899649abf1604cb6aefda80852371385abd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ComputeLaplacianInPlace.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d = NULL;
hipMalloc(&d, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ComputeLaplacianInPlace), dim3(gridBlock),dim3(threadBlock), 0, 0, d,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ComputeLaplacianInPlace), dim3(gridBlock),dim3(threadBlock), 0, 0, d,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ComputeLaplacianInPlace), dim3(gridBlock),dim3(threadBlock), 0, 0, d,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 05c42899649abf1604cb6aefda80852371385abd.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ComputeLaplacianInPlace.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d = NULL;
cudaMalloc(&d, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ComputeLaplacianInPlace<<<gridBlock,threadBlock>>>(d,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ComputeLaplacianInPlace<<<gridBlock,threadBlock>>>(d,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ComputeLaplacianInPlace<<<gridBlock,threadBlock>>>(d,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
cb9f7c6e31dd0b390fc7fd657cffd5682233f31d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//#ifdef NVGRAPH_PARTITION
//#ifdef DEBUG
#include "include/kmeans.hxx"
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/random.h>
#include <thrust/gather.h>
#include "include/nvgraph_vector.hxx"
#include "include/nvgraph_cublas.hxx"
#include "include/atomics.hxx"
#include "include/sm_utils.h"
#include "include/debug_macros.h"
using namespace nvgraph;
// =========================================================
// Useful macros
// =========================================================
#define BLOCK_SIZE 1024
#define WARP_SIZE 32
#define BSIZE_DIV_WSIZE (BLOCK_SIZE/WARP_SIZE)
// Get index of matrix entry
#define IDX(i,j,lda) ((i)+(j)*(lda))
namespace {
// =========================================================
// CUDA kernels
// =========================================================
/// Compute distances between observation vectors and centroids
/** Block dimensions should be (warpSize, 1,
* blockSize/warpSize). Ideally, the grid is large enough so there
* are d threads in the x-direction, k threads in the y-direction,
* and n threads in the z-direction.
*
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param obs (Input, d*n entries) Observation matrix. Matrix is
* stored column-major and each column is an observation
* vector. Matrix dimensions are d x n.
* @param centroids (Input, d*k entries) Centroid matrix. Matrix is
* stored column-major and each column is a centroid. Matrix
* dimensions are d x k.
* @param dists (Output, n*k entries) Distance matrix. Matrix is
* stored column-major and the (i,j)-entry is the square of the
* Euclidean distance between the ith observation vector and jth
* centroid. Matrix dimensions are n x k. Entries must be
* initialized to zero.
*/
template <typename IndexType_, typename ValueType_>
static __global__
void computeDistances(IndexType_ n, IndexType_ d, IndexType_ k,
const ValueType_ * __restrict__ obs,
const ValueType_ * __restrict__ centroids,
ValueType_ * __restrict__ dists) {
// Loop index
IndexType_ i;
// Block indices
IndexType_ bidx;
// Global indices
IndexType_ gidx, gidy, gidz;
// Private memory
ValueType_ centroid_private, dist_private;
// Global x-index indicates index of vector entry
bidx = blockIdx.x;
while(bidx*blockDim.x < d) {
gidx = threadIdx.x + bidx*blockDim.x;
// Global y-index indicates centroid
gidy = threadIdx.y + blockIdx.y*blockDim.y;
while(gidy < k) {
// Load centroid coordinate from global memory
centroid_private
= (gidx < d) ? centroids[IDX(gidx,gidy,d)] : 0;
// Global z-index indicates observation vector
gidz = threadIdx.z + blockIdx.z*blockDim.z;
while(gidz < n) {
// Load observation vector coordinate from global memory
dist_private
= (gidx < d) ? obs[IDX(gidx,gidz,d)] : 0;
// Compute contribution of current entry to distance
dist_private = centroid_private - dist_private;
dist_private = dist_private*dist_private;
// Perform reduction on warp
for(i=WARP_SIZE/2; i>0; i/=2)
dist_private += utils::shfl_down(dist_private, i, 2*i);
// Write result to global memory
if(threadIdx.x == 0)
atomicFPAdd(dists+IDX(gidz,gidy,n), dist_private);
// Move to another observation vector
gidz += blockDim.z*gridDim.z;
}
// Move to another centroid
gidy += blockDim.y*gridDim.y;
}
// Move to another vector entry
bidx += gridDim.x;
}
}
/// Find closest centroid to observation vectors
/** Block and grid dimensions should be 1-dimensional. Ideally the
* grid is large enough so there are n threads.
*
* @param n Number of observation vectors.
* @param k Number of clusters.
* @param centroids (Input, d*k entries) Centroid matrix. Matrix is
* stored column-major and each column is a centroid. Matrix
* dimensions are d x k.
* @param dists (Input/output, n*k entries) Distance matrix. Matrix
* is stored column-major and the (i,j)-entry is the square of
* the Euclidean distance between the ith observation vector and
* jth centroid. Matrix dimensions are n x k. On exit, the first
* n entries give the square of the Euclidean distance between
* observation vectors and closest centroids.
* @param codes (Output, n entries) Cluster assignments.
* @param clusterSizes (Output, k entries) Number of points in each
* cluster. Entries must be initialized to zero.
*/
template <typename IndexType_, typename ValueType_>
static __global__
void minDistances(IndexType_ n, IndexType_ k,
ValueType_ * __restrict__ dists,
IndexType_ * __restrict__ codes,
IndexType_ * __restrict__ clusterSizes) {
// Loop index
IndexType_ i, j;
// Current matrix entry
ValueType_ dist_curr;
// Smallest entry in row
ValueType_ dist_min;
IndexType_ code_min;
// Each row in observation matrix is processed by a thread
i = threadIdx.x + blockIdx.x*blockDim.x;
while(i<n) {
// Find minimum entry in row
code_min = 0;
dist_min = dists[IDX(i,0,n)];
for(j=1; j<k; ++j) {
dist_curr = dists[IDX(i,j,n)];
code_min = (dist_curr<dist_min) ? j : code_min;
dist_min = (dist_curr<dist_min) ? dist_curr : dist_min;
}
// Transfer result to global memory
dists[i] = dist_min;
codes[i] = code_min;
// Increment cluster sizes
atomicAdd(clusterSizes+code_min, 1);
// Move to another row
i += blockDim.x*gridDim.x;
}
}
/// Check if newly computed distances are smaller than old distances
/** Block and grid dimensions should be 1-dimensional. Ideally the
* grid is large enough so there are n threads.
*
* @param n Number of observation vectors.
* @param dists_old (Input/output, n entries) Distances between
* observation vectors and closest centroids. On exit, entries
* are replaced by entries in 'dists_new' if the corresponding
* observation vectors are closest to the new centroid.
* @param dists_new (Input, n entries) Distance between observation
* vectors and new centroid.
* @param codes_old (Input/output, n entries) Cluster
* assignments. On exit, entries are replaced with 'code_new' if
* the corresponding observation vectors are closest to the new
* centroid.
* @param code_new Index associated with new centroid.
*/
template <typename IndexType_, typename ValueType_>
static __global__
void minDistances2(IndexType_ n,
ValueType_ * __restrict__ dists_old,
const ValueType_ * __restrict__ dists_new,
IndexType_ * __restrict__ codes_old,
IndexType_ code_new) {
// Loop index
IndexType_ i;
// Distances
ValueType_ dist_old_private;
ValueType_ dist_new_private;
// Each row is processed by a thread
i = threadIdx.x + blockIdx.x*blockDim.x;
while(i<n) {
// Get old and new distances
dist_old_private = dists_old[i];
dist_new_private = dists_new[i];
// Update if new distance is smaller than old distance
if(dist_new_private < dist_old_private) {
dists_old[i] = dist_new_private;
codes_old[i] = code_new;
}
// Move to another row
i += blockDim.x*gridDim.x;
}
}
/// Compute size of k-means clusters
/** Block and grid dimensions should be 1-dimensional. Ideally the
* grid is large enough so there are n threads.
*
* @param n Number of observation vectors.
* @param k Number of clusters.
* @param codes (Input, n entries) Cluster assignments.
* @param clusterSizes (Output, k entries) Number of points in each
* cluster. Entries must be initialized to zero.
*/
template <typename IndexType_> static __global__
void computeClusterSizes(IndexType_ n, IndexType_ k,
const IndexType_ * __restrict__ codes,
IndexType_ * __restrict__ clusterSizes) {
IndexType_ i = threadIdx.x + blockIdx.x*blockDim.x;
while(i<n) {
atomicAdd(clusterSizes+codes[i], 1);
i += blockDim.x*gridDim.x;
}
}
/// Divide rows of centroid matrix by cluster sizes
/** Divides the ith column of the sum matrix by the size of the ith
* cluster. If the sum matrix has been initialized so that the ith
* row is the sum of all observation vectors in the ith cluster,
* this kernel produces cluster centroids. The grid and block
* dimensions should be 2-dimensional. Ideally the grid is large
* enough so there are d threads in the x-direction and k threads
* in the y-direction.
*
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param clusterSizes (Input, k entries) Number of points in each
* cluster.
* @param centroids (Input/output, d*k entries) Sum matrix. Matrix
* is stored column-major and matrix dimensions are d x k. The
* ith column is the sum of all observation vectors in the ith
* cluster. On exit, the matrix is the centroid matrix (each
* column is the mean position of a cluster).
*/
template <typename IndexType_, typename ValueType_>
static __global__
void divideCentroids(IndexType_ d, IndexType_ k,
const IndexType_ * __restrict__ clusterSizes,
ValueType_ * __restrict__ centroids) {
// Global indices
IndexType_ gidx, gidy;
// Current cluster size
IndexType_ clusterSize_private;
// Observation vector is determined by global y-index
gidy = threadIdx.y + blockIdx.y*blockDim.y;
while(gidy < k) {
// Get cluster size from global memory
clusterSize_private = clusterSizes[gidy];
// Add vector entries to centroid matrix
// Vector entris are determined by global x-index
gidx = threadIdx.x + blockIdx.x*blockDim.x;
while(gidx < d) {
centroids[IDX(gidx,gidy,d)] /= clusterSize_private;
gidx += blockDim.x*gridDim.x;
}
// Move to another centroid
gidy += blockDim.y*gridDim.y;
}
}
// =========================================================
// Helper functions
// =========================================================
/// Randomly choose new centroids
/** Centroid is randomly chosen with k-means++ algorithm.
*
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param rand Random number drawn uniformly from [0,1).
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are n x d.
* @param dists (Input, device memory, 2*n entries) Workspace. The
* first n entries should be the distance between observation
* vectors and the closest centroid.
* @param centroid (Output, device memory, d entries) Centroid
* coordinates.
* @return Zero if successful. Otherwise non-zero.
*/
template <typename IndexType_, typename ValueType_> static
int chooseNewCentroid(IndexType_ n, IndexType_ d, IndexType_ k,
ValueType_ rand,
const ValueType_ * __restrict__ obs,
ValueType_ * __restrict__ dists,
ValueType_ * __restrict__ centroid) {
using namespace thrust;
// Cumulative sum of distances
ValueType_ * distsCumSum = dists + n;
// Residual sum of squares
ValueType_ distsSum;
// Observation vector that is chosen as new centroid
IndexType_ obsIndex;
// Compute cumulative sum of distances
inclusive_scan(device_pointer_cast(dists),
device_pointer_cast(dists+n),
device_pointer_cast(distsCumSum));
cudaCheckError();
CHECK_CUDA(hipMemcpy(&distsSum, distsCumSum+n-1,
sizeof(ValueType_),
hipMemcpyDeviceToHost));
// Randomly choose observation vector
// Probabilities are proportional to square of distance to closest
// centroid (see k-means++ algorithm)
obsIndex = (lower_bound(device_pointer_cast(distsCumSum),
device_pointer_cast(distsCumSum+n),
distsSum*rand)
- device_pointer_cast(distsCumSum));
cudaCheckError();
obsIndex = max(obsIndex, 0);
obsIndex = min(obsIndex, n-1);
// Record new centroid position
CHECK_CUDA(hipMemcpyAsync(centroid, obs+IDX(0,obsIndex,d),
d*sizeof(ValueType_),
hipMemcpyDeviceToDevice));
return 0;
}
/// Choose initial cluster centroids for k-means algorithm
/** Centroids are randomly chosen with k-means++ algorithm
*
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are d x n.
* @param centroids (Output, device memory, d*k entries) Centroid
* matrix. Matrix is stored column-major and each column is a
* centroid. Matrix dimensions are d x k.
* @param codes (Output, device memory, n entries) Cluster
* assignments.
* @param clusterSizes (Output, device memory, k entries) Number of
* points in each cluster.
* @param dists (Output, device memory, 2*n entries) Workspace. On
* exit, the first n entries give the square of the Euclidean
* distance between observation vectors and the closest centroid.
* @return Zero if successful. Otherwise non-zero.
*/
template <typename IndexType_, typename ValueType_> static
int initializeCentroids(IndexType_ n, IndexType_ d, IndexType_ k,
const ValueType_ * __restrict__ obs,
ValueType_ * __restrict__ centroids,
IndexType_ * __restrict__ codes,
IndexType_ * __restrict__ clusterSizes,
ValueType_ * __restrict__ dists) {
// -------------------------------------------------------
// Variable declarations
// -------------------------------------------------------
// Loop index
IndexType_ i;
// CUDA grid dimensions
dim3 blockDim_warp, gridDim_warp, gridDim_block;
// Random number generator
thrust::default_random_engine rng(123456);
thrust::uniform_real_distribution<ValueType_> uniformDist(0,1);
// -------------------------------------------------------
// Implementation
// -------------------------------------------------------
// Initialize grid dimensions
blockDim_warp.x = WARP_SIZE;
blockDim_warp.y = 1;
blockDim_warp.z = BSIZE_DIV_WSIZE;
gridDim_warp.x = min((d+WARP_SIZE-1)/WARP_SIZE, 65535);
gridDim_warp.y = 1;
gridDim_warp.z
= min((n+BSIZE_DIV_WSIZE-1)/BSIZE_DIV_WSIZE, 65535);
gridDim_block.x = min((n+BLOCK_SIZE-1)/BLOCK_SIZE, 65535);
gridDim_block.y = 1;
gridDim_block.z = 1;
// Assign observation vectors to code 0
CHECK_CUDA(hipMemsetAsync(codes, 0, n*sizeof(IndexType_)));
// Choose first centroid
thrust::fill(thrust::device_pointer_cast(dists),
thrust::device_pointer_cast(dists+n), 1);
cudaCheckError();
if(chooseNewCentroid(n, d, k, uniformDist(rng), obs, dists, centroids))
WARNING("error in k-means++ (could not pick centroid)");
// Compute distances from first centroid
CHECK_CUDA(hipMemsetAsync(dists, 0, n*sizeof(ValueType_)));
hipLaunchKernelGGL(( computeDistances) , dim3(gridDim_warp), dim3(blockDim_warp) , 0, 0,
n, d, 1, obs, centroids, dists);
cudaCheckError()
// Choose remaining centroids
for(i=1; i<k; ++i) {
// Choose ith centroid
if(chooseNewCentroid(n, d, k, uniformDist(rng),obs, dists, centroids+IDX(0,i,d)))
WARNING("error in k-means++ (could not pick centroid)");
// Compute distances from ith centroid
CHECK_CUDA(hipMemsetAsync(dists+n, 0, n*sizeof(ValueType_)));
hipLaunchKernelGGL(( computeDistances) , dim3(gridDim_warp), dim3(blockDim_warp) , 0, 0,
n, d, 1, obs, centroids+IDX(0,i,d), dists+n);
cudaCheckError();
// Recompute minimum distances
hipLaunchKernelGGL(( minDistances2) , dim3(gridDim_block), dim3(BLOCK_SIZE) , 0, 0,
n, dists, dists+n, codes, i);
cudaCheckError();
}
// Compute cluster sizes
CHECK_CUDA(hipMemsetAsync(clusterSizes, 0, k*sizeof(IndexType_)));
hipLaunchKernelGGL(( computeClusterSizes) , dim3(gridDim_block), dim3(BLOCK_SIZE) , 0, 0,
n, k, codes, clusterSizes);
cudaCheckError();
return 0;
}
/// Find cluster centroids closest to observation vectors
/** Distance is measured with Euclidean norm.
*
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are d x n.
* @param centroids (Input, device memory, d*k entries) Centroid
* matrix. Matrix is stored column-major and each column is a
* centroid. Matrix dimensions are d x k.
* @param dists (Output, device memory, n*k entries) Workspace. On
* exit, the first n entries give the square of the Euclidean
* distance between observation vectors and the closest centroid.
* @param codes (Output, device memory, n entries) Cluster
* assignments.
* @param clusterSizes (Output, device memory, k entries) Number of
* points in each cluster.
* @param residual_host (Output, host memory, 1 entry) Residual sum
* of squares of assignment.
* @return Zero if successful. Otherwise non-zero.
*/
template <typename IndexType_, typename ValueType_> static
int assignCentroids(IndexType_ n, IndexType_ d, IndexType_ k,
const ValueType_ * __restrict__ obs,
const ValueType_ * __restrict__ centroids,
ValueType_ * __restrict__ dists,
IndexType_ * __restrict__ codes,
IndexType_ * __restrict__ clusterSizes,
ValueType_ * residual_host) {
// CUDA grid dimensions
dim3 blockDim, gridDim;
// Compute distance between centroids and observation vectors
CHECK_CUDA(hipMemsetAsync(dists, 0, n*k*sizeof(ValueType_)));
blockDim.x = WARP_SIZE;
blockDim.y = 1;
blockDim.z = BLOCK_SIZE/WARP_SIZE;
gridDim.x = min((d+WARP_SIZE-1)/WARP_SIZE, 65535);
gridDim.y = min(k, 65535);
gridDim.z = min((n+BSIZE_DIV_WSIZE-1)/BSIZE_DIV_WSIZE, 65535);
hipLaunchKernelGGL(( computeDistances) , dim3(gridDim), dim3(blockDim) , 0, 0, n, d, k,
obs, centroids,
dists);
cudaCheckError();
// Find centroid closest to each observation vector
CHECK_CUDA(hipMemsetAsync(clusterSizes,0,k*sizeof(IndexType_)));
blockDim.x = BLOCK_SIZE;
blockDim.y = 1;
blockDim.z = 1;
gridDim.x = min((n+BLOCK_SIZE-1)/BLOCK_SIZE, 65535);
gridDim.y = 1;
gridDim.z = 1;
hipLaunchKernelGGL(( minDistances) , dim3(gridDim), dim3(blockDim) , 0, 0, n, k, dists, codes,
clusterSizes);
cudaCheckError();
// Compute residual sum of squares
*residual_host
= thrust::reduce(thrust::device_pointer_cast(dists),
thrust::device_pointer_cast(dists+n));
return 0;
}
/// Update cluster centroids for k-means algorithm
/** All clusters are assumed to be non-empty.
*
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are d x n.
* @param codes (Input, device memory, n entries) Cluster
* assignments.
* @param clusterSizes (Input, device memory, k entries) Number of
* points in each cluster.
* @param centroids (Output, device memory, d*k entries) Centroid
* matrix. Matrix is stored column-major and each column is a
* centroid. Matrix dimensions are d x k.
* @param work (Output, device memory, n*d entries) Workspace.
* @param work_int (Output, device memory, 2*d*n entries)
* Workspace.
* @return Zero if successful. Otherwise non-zero.
*/
template <typename IndexType_, typename ValueType_> static
int updateCentroids(IndexType_ n, IndexType_ d, IndexType_ k,
const ValueType_ * __restrict__ obs,
const IndexType_ * __restrict__ codes,
const IndexType_ * __restrict__ clusterSizes,
ValueType_ * __restrict__ centroids,
ValueType_ * __restrict__ work,
IndexType_ * __restrict__ work_int) {
using namespace thrust;
// -------------------------------------------------------
// Variable declarations
// -------------------------------------------------------
// Useful constants
const ValueType_ one = 1;
const ValueType_ zero = 0;
// CUDA grid dimensions
dim3 blockDim, gridDim;
// Device memory
device_ptr<ValueType_> obs_copy(work);
device_ptr<IndexType_> codes_copy(work_int);
device_ptr<IndexType_> rows(work_int+d*n);
// Take transpose of observation matrix
Cublas::geam(true, false, n, d,
&one, obs, d, &zero, (ValueType_*) NULL, n,
raw_pointer_cast(obs_copy), n);
// Cluster assigned to each observation matrix entry
sequence(rows, rows+d*n);
cudaCheckError();
transform(rows, rows+d*n, make_constant_iterator<IndexType_>(n),
rows, modulus<IndexType_>());
cudaCheckError();
gather(rows, rows+d*n, device_pointer_cast(codes), codes_copy);
cudaCheckError();
// Row associated with each observation matrix entry
sequence(rows, rows+d*n);
cudaCheckError();
transform(rows, rows+d*n, make_constant_iterator<IndexType_>(n),
rows, divides<IndexType_>());
cudaCheckError();
// Sort and reduce to add observation vectors in same cluster
stable_sort_by_key(codes_copy, codes_copy+d*n,
make_zip_iterator(make_tuple(obs_copy, rows)));
cudaCheckError();
reduce_by_key(rows, rows+d*n, obs_copy,
codes_copy, // Output to codes_copy is ignored
device_pointer_cast(centroids));
cudaCheckError();
// Divide sums by cluster size to get centroid matrix
blockDim.x = WARP_SIZE;
blockDim.y = BLOCK_SIZE/WARP_SIZE;
blockDim.z = 1;
gridDim.x = min((d+WARP_SIZE-1)/WARP_SIZE, 65535);
gridDim.y = min((k+BSIZE_DIV_WSIZE-1)/BSIZE_DIV_WSIZE, 65535);
gridDim.z = 1;
hipLaunchKernelGGL(( divideCentroids) , dim3(gridDim), dim3(blockDim) , 0, 0, d, k, clusterSizes,
centroids);
cudaCheckError();
return 0;
}
}
namespace nvgraph {
// =========================================================
// k-means algorithm
// =========================================================
/// Find clusters with k-means algorithm
/** Initial centroids are chosen with k-means++ algorithm. Empty
* clusters are reinitialized by choosing new centroids with
* k-means++ algorithm.
*
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param tol Tolerance for convergence. k-means stops when the
* change in residual divided by n is less than tol.
* @param maxiter Maximum number of k-means iterations.
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are d x n.
* @param codes (Output, device memory, n entries) Cluster
* assignments.
* @param clusterSizes (Output, device memory, k entries) Number of
* points in each cluster.
* @param centroids (Output, device memory, d*k entries) Centroid
* matrix. Matrix is stored column-major and each column is a
* centroid. Matrix dimensions are d x k.
* @param work (Output, device memory, n*max(k,d) entries)
* Workspace.
* @param work_int (Output, device memory, 2*d*n entries)
* Workspace.
* @param residual_host (Output, host memory, 1 entry) Residual sum
* of squares (sum of squares of distances between observation
* vectors and centroids).
* @param iters_host (Output, host memory, 1 entry) Number of
* k-means iterations.
* @return NVGRAPH error flag.
*/
template <typename IndexType_, typename ValueType_>
NVGRAPH_ERROR kmeans(IndexType_ n, IndexType_ d, IndexType_ k,
ValueType_ tol, IndexType_ maxiter,
const ValueType_ * __restrict__ obs,
IndexType_ * __restrict__ codes,
IndexType_ * __restrict__ clusterSizes,
ValueType_ * __restrict__ centroids,
ValueType_ * __restrict__ work,
IndexType_ * __restrict__ work_int,
ValueType_ * residual_host,
IndexType_ * iters_host) {
// -------------------------------------------------------
// Variable declarations
// -------------------------------------------------------
// Current iteration
IndexType_ iter;
// Residual sum of squares at previous iteration
ValueType_ residualPrev = 0;
// Random number generator
thrust::default_random_engine rng(123456);
thrust::uniform_real_distribution<ValueType_> uniformDist(0,1);
// -------------------------------------------------------
// Initialization
// -------------------------------------------------------
// Check that parameters are valid
if(n < 1) {
WARNING("invalid parameter (n<1)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
if(d < 1) {
WARNING("invalid parameter (d<1)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
if(k < 1) {
WARNING("invalid parameter (k<1)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
if(tol < 0) {
WARNING("invalid parameter (tol<0)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
if(maxiter < 0) {
WARNING("invalid parameter (maxiter<0)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
// Trivial cases
if(k == 1) {
CHECK_CUDA(hipMemsetAsync(codes, 0, n*sizeof(IndexType_)));
CHECK_CUDA(hipMemcpyAsync(clusterSizes, &n, sizeof(IndexType_),
hipMemcpyHostToDevice));
if(updateCentroids(n, d, k, obs, codes,
clusterSizes, centroids,
work, work_int))
WARNING("could not compute k-means centroids");
dim3 blockDim, gridDim;
blockDim.x = WARP_SIZE;
blockDim.y = 1;
blockDim.z = BLOCK_SIZE/WARP_SIZE;
gridDim.x = min((d+WARP_SIZE-1)/WARP_SIZE, 65535);
gridDim.y = 1;
gridDim.z = min((n+BLOCK_SIZE/WARP_SIZE-1)/(BLOCK_SIZE/WARP_SIZE), 65535);
CHECK_CUDA(hipMemsetAsync(work, 0, n*k*sizeof(ValueType_)));
hipLaunchKernelGGL(( computeDistances) , dim3(gridDim), dim3(blockDim) , 0, 0, n, d, 1,
obs,
centroids,
work);
cudaCheckError();
*residual_host = thrust::reduce(thrust::device_pointer_cast(work),
thrust::device_pointer_cast(work+n));
cudaCheckError();
return NVGRAPH_OK;
}
if(n <= k) {
thrust::sequence(thrust::device_pointer_cast(codes),
thrust::device_pointer_cast(codes+n));
cudaCheckError();
thrust::fill_n(thrust::device_pointer_cast(clusterSizes), n, 1);
cudaCheckError();
if(n < k)
CHECK_CUDA(hipMemsetAsync(clusterSizes+n, 0, (k-n)*sizeof(IndexType_)));
CHECK_CUDA(hipMemcpyAsync(centroids, obs, d*n*sizeof(ValueType_),
hipMemcpyDeviceToDevice));
*residual_host = 0;
return NVGRAPH_OK;
}
// Initialize cuBLAS
Cublas::set_pointer_mode_host();
// -------------------------------------------------------
// k-means++ algorithm
// -------------------------------------------------------
// Choose initial cluster centroids
if(initializeCentroids(n, d, k, obs, centroids, codes,
clusterSizes, work))
WARNING("could not initialize k-means centroids");
// Apply k-means iteration until convergence
for(iter=0; iter<maxiter; ++iter) {
// Update cluster centroids
if(updateCentroids(n, d, k, obs, codes,
clusterSizes, centroids,
work, work_int)) WARNING("could not update k-means centroids");
// Determine centroid closest to each observation
residualPrev = *residual_host;
if(assignCentroids(n, d, k, obs, centroids, work,
codes, clusterSizes, residual_host))
WARNING("could not assign observation vectors to k-means clusters");
// Reinitialize empty clusters with new centroids
IndexType_ emptyCentroid = (thrust::find(thrust::device_pointer_cast(clusterSizes),
thrust::device_pointer_cast(clusterSizes+k), 0) - thrust::device_pointer_cast(clusterSizes));
while(emptyCentroid < k) {
if(chooseNewCentroid(n, d, k, uniformDist(rng), obs, work, centroids+IDX(0,emptyCentroid,d)))
WARNING("could not replace empty centroid");
if(assignCentroids(n, d, k, obs, centroids, work, codes, clusterSizes, residual_host))
WARNING("could not assign observation vectors to k-means clusters");
emptyCentroid = (thrust::find(thrust::device_pointer_cast(clusterSizes),
thrust::device_pointer_cast(clusterSizes+k), 0) - thrust::device_pointer_cast(clusterSizes));
cudaCheckError();
}
// Check for convergence
if(fabs(residualPrev-(*residual_host))/n < tol) {
++iter;
break;
}
}
// Warning if k-means has failed to converge
if(fabs(residualPrev-(*residual_host))/n >= tol)
WARNING("k-means failed to converge");
*iters_host = iter;
return NVGRAPH_OK;
}
/// Find clusters with k-means algorithm
/** Initial centroids are chosen with k-means++ algorithm. Empty
* clusters are reinitialized by choosing new centroids with
* k-means++ algorithm.
*
* CNMEM must be initialized before calling this function.
*
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param tol Tolerance for convergence. k-means stops when the
* change in residual divided by n is less than tol.
* @param maxiter Maximum number of k-means iterations.
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are d x n.
* @param codes (Output, device memory, n entries) Cluster
* assignments.
* @param residual On exit, residual sum of squares (sum of squares
* of distances between observation vectors and centroids).
* @param On exit, number of k-means iterations.
* @return NVGRAPH error flag
*/
template <typename IndexType_, typename ValueType_>
NVGRAPH_ERROR kmeans(IndexType_ n, IndexType_ d, IndexType_ k,
ValueType_ tol, IndexType_ maxiter,
const ValueType_ * __restrict__ obs,
IndexType_ * __restrict__ codes,
ValueType_ & residual,
IndexType_ & iters) {
// Check that parameters are valid
if(n < 1) {
WARNING("invalid parameter (n<1)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
if(d < 1) {
WARNING("invalid parameter (d<1)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
if(k < 1) {
WARNING("invalid parameter (k<1)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
if(tol < 0) {
WARNING("invalid parameter (tol<0)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
if(maxiter < 0) {
WARNING("invalid parameter (maxiter<0)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
// Allocate memory
// TODO: handle non-zero CUDA streams
hipStream_t stream = 0;
Vector<IndexType_> clusterSizes(k, stream);
Vector<ValueType_> centroids(d*k, stream);
Vector<ValueType_> work(n*max(k,d), stream);
Vector<IndexType_> work_int(2*d*n, stream);
// Perform k-means
return kmeans<IndexType_,ValueType_>(n, d, k, tol, maxiter,
obs, codes,
clusterSizes.raw(),
centroids.raw(),
work.raw(), work_int.raw(),
&residual, &iters);
}
// =========================================================
// Explicit instantiations
// =========================================================
template
NVGRAPH_ERROR kmeans<int, float>(int n, int d, int k,
float tol, int maxiter,
const float * __restrict__ obs,
int * __restrict__ codes,
float & residual,
int & iters);
template
NVGRAPH_ERROR kmeans<int, double>(int n, int d, int k,
double tol, int maxiter,
const double * __restrict__ obs,
int * __restrict__ codes,
double & residual,
int & iters);
}
//#endif //NVGRAPH_PARTITION
//#endif //debug
| cb9f7c6e31dd0b390fc7fd657cffd5682233f31d.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//#ifdef NVGRAPH_PARTITION
//#ifdef DEBUG
#include "include/kmeans.hxx"
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <cuda.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/random.h>
#include <thrust/gather.h>
#include "include/nvgraph_vector.hxx"
#include "include/nvgraph_cublas.hxx"
#include "include/atomics.hxx"
#include "include/sm_utils.h"
#include "include/debug_macros.h"
using namespace nvgraph;
// =========================================================
// Useful macros
// =========================================================
#define BLOCK_SIZE 1024
#define WARP_SIZE 32
#define BSIZE_DIV_WSIZE (BLOCK_SIZE/WARP_SIZE)
// Get index of matrix entry
#define IDX(i,j,lda) ((i)+(j)*(lda))
namespace {
// =========================================================
// CUDA kernels
// =========================================================
/// Compute distances between observation vectors and centroids
/** Block dimensions should be (warpSize, 1,
* blockSize/warpSize). Ideally, the grid is large enough so there
* are d threads in the x-direction, k threads in the y-direction,
* and n threads in the z-direction.
*
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param obs (Input, d*n entries) Observation matrix. Matrix is
* stored column-major and each column is an observation
* vector. Matrix dimensions are d x n.
* @param centroids (Input, d*k entries) Centroid matrix. Matrix is
* stored column-major and each column is a centroid. Matrix
* dimensions are d x k.
* @param dists (Output, n*k entries) Distance matrix. Matrix is
* stored column-major and the (i,j)-entry is the square of the
* Euclidean distance between the ith observation vector and jth
* centroid. Matrix dimensions are n x k. Entries must be
* initialized to zero.
*/
template <typename IndexType_, typename ValueType_>
static __global__
void computeDistances(IndexType_ n, IndexType_ d, IndexType_ k,
const ValueType_ * __restrict__ obs,
const ValueType_ * __restrict__ centroids,
ValueType_ * __restrict__ dists) {
// Loop index
IndexType_ i;
// Block indices
IndexType_ bidx;
// Global indices
IndexType_ gidx, gidy, gidz;
// Private memory
ValueType_ centroid_private, dist_private;
// Global x-index indicates index of vector entry
bidx = blockIdx.x;
while(bidx*blockDim.x < d) {
gidx = threadIdx.x + bidx*blockDim.x;
// Global y-index indicates centroid
gidy = threadIdx.y + blockIdx.y*blockDim.y;
while(gidy < k) {
// Load centroid coordinate from global memory
centroid_private
= (gidx < d) ? centroids[IDX(gidx,gidy,d)] : 0;
// Global z-index indicates observation vector
gidz = threadIdx.z + blockIdx.z*blockDim.z;
while(gidz < n) {
// Load observation vector coordinate from global memory
dist_private
= (gidx < d) ? obs[IDX(gidx,gidz,d)] : 0;
// Compute contribution of current entry to distance
dist_private = centroid_private - dist_private;
dist_private = dist_private*dist_private;
// Perform reduction on warp
for(i=WARP_SIZE/2; i>0; i/=2)
dist_private += utils::shfl_down(dist_private, i, 2*i);
// Write result to global memory
if(threadIdx.x == 0)
atomicFPAdd(dists+IDX(gidz,gidy,n), dist_private);
// Move to another observation vector
gidz += blockDim.z*gridDim.z;
}
// Move to another centroid
gidy += blockDim.y*gridDim.y;
}
// Move to another vector entry
bidx += gridDim.x;
}
}
/// Find closest centroid to observation vectors
/** Block and grid dimensions should be 1-dimensional. Ideally the
* grid is large enough so there are n threads.
*
* @param n Number of observation vectors.
* @param k Number of clusters.
* @param centroids (Input, d*k entries) Centroid matrix. Matrix is
* stored column-major and each column is a centroid. Matrix
* dimensions are d x k.
* @param dists (Input/output, n*k entries) Distance matrix. Matrix
* is stored column-major and the (i,j)-entry is the square of
* the Euclidean distance between the ith observation vector and
* jth centroid. Matrix dimensions are n x k. On exit, the first
* n entries give the square of the Euclidean distance between
* observation vectors and closest centroids.
* @param codes (Output, n entries) Cluster assignments.
* @param clusterSizes (Output, k entries) Number of points in each
* cluster. Entries must be initialized to zero.
*/
template <typename IndexType_, typename ValueType_>
static __global__
void minDistances(IndexType_ n, IndexType_ k,
ValueType_ * __restrict__ dists,
IndexType_ * __restrict__ codes,
IndexType_ * __restrict__ clusterSizes) {
// Loop index
IndexType_ i, j;
// Current matrix entry
ValueType_ dist_curr;
// Smallest entry in row
ValueType_ dist_min;
IndexType_ code_min;
// Each row in observation matrix is processed by a thread
i = threadIdx.x + blockIdx.x*blockDim.x;
while(i<n) {
// Find minimum entry in row
code_min = 0;
dist_min = dists[IDX(i,0,n)];
for(j=1; j<k; ++j) {
dist_curr = dists[IDX(i,j,n)];
code_min = (dist_curr<dist_min) ? j : code_min;
dist_min = (dist_curr<dist_min) ? dist_curr : dist_min;
}
// Transfer result to global memory
dists[i] = dist_min;
codes[i] = code_min;
// Increment cluster sizes
atomicAdd(clusterSizes+code_min, 1);
// Move to another row
i += blockDim.x*gridDim.x;
}
}
/// Check if newly computed distances are smaller than old distances
/** Block and grid dimensions should be 1-dimensional. Ideally the
* grid is large enough so there are n threads.
*
* @param n Number of observation vectors.
* @param dists_old (Input/output, n entries) Distances between
* observation vectors and closest centroids. On exit, entries
* are replaced by entries in 'dists_new' if the corresponding
* observation vectors are closest to the new centroid.
* @param dists_new (Input, n entries) Distance between observation
* vectors and new centroid.
* @param codes_old (Input/output, n entries) Cluster
* assignments. On exit, entries are replaced with 'code_new' if
* the corresponding observation vectors are closest to the new
* centroid.
* @param code_new Index associated with new centroid.
*/
template <typename IndexType_, typename ValueType_>
static __global__
void minDistances2(IndexType_ n,
ValueType_ * __restrict__ dists_old,
const ValueType_ * __restrict__ dists_new,
IndexType_ * __restrict__ codes_old,
IndexType_ code_new) {
// Loop index
IndexType_ i;
// Distances
ValueType_ dist_old_private;
ValueType_ dist_new_private;
// Each row is processed by a thread
i = threadIdx.x + blockIdx.x*blockDim.x;
while(i<n) {
// Get old and new distances
dist_old_private = dists_old[i];
dist_new_private = dists_new[i];
// Update if new distance is smaller than old distance
if(dist_new_private < dist_old_private) {
dists_old[i] = dist_new_private;
codes_old[i] = code_new;
}
// Move to another row
i += blockDim.x*gridDim.x;
}
}
/// Compute size of k-means clusters
/** Block and grid dimensions should be 1-dimensional. Ideally the
* grid is large enough so there are n threads.
*
* @param n Number of observation vectors.
* @param k Number of clusters.
* @param codes (Input, n entries) Cluster assignments.
* @param clusterSizes (Output, k entries) Number of points in each
* cluster. Entries must be initialized to zero.
*/
template <typename IndexType_> static __global__
void computeClusterSizes(IndexType_ n, IndexType_ k,
const IndexType_ * __restrict__ codes,
IndexType_ * __restrict__ clusterSizes) {
IndexType_ i = threadIdx.x + blockIdx.x*blockDim.x;
while(i<n) {
atomicAdd(clusterSizes+codes[i], 1);
i += blockDim.x*gridDim.x;
}
}
/// Divide rows of centroid matrix by cluster sizes
/** Divides the ith column of the sum matrix by the size of the ith
* cluster. If the sum matrix has been initialized so that the ith
* row is the sum of all observation vectors in the ith cluster,
* this kernel produces cluster centroids. The grid and block
* dimensions should be 2-dimensional. Ideally the grid is large
* enough so there are d threads in the x-direction and k threads
* in the y-direction.
*
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param clusterSizes (Input, k entries) Number of points in each
* cluster.
* @param centroids (Input/output, d*k entries) Sum matrix. Matrix
* is stored column-major and matrix dimensions are d x k. The
* ith column is the sum of all observation vectors in the ith
* cluster. On exit, the matrix is the centroid matrix (each
* column is the mean position of a cluster).
*/
template <typename IndexType_, typename ValueType_>
static __global__
void divideCentroids(IndexType_ d, IndexType_ k,
const IndexType_ * __restrict__ clusterSizes,
ValueType_ * __restrict__ centroids) {
// Global indices
IndexType_ gidx, gidy;
// Current cluster size
IndexType_ clusterSize_private;
// Observation vector is determined by global y-index
gidy = threadIdx.y + blockIdx.y*blockDim.y;
while(gidy < k) {
// Get cluster size from global memory
clusterSize_private = clusterSizes[gidy];
// Add vector entries to centroid matrix
// Vector entris are determined by global x-index
gidx = threadIdx.x + blockIdx.x*blockDim.x;
while(gidx < d) {
centroids[IDX(gidx,gidy,d)] /= clusterSize_private;
gidx += blockDim.x*gridDim.x;
}
// Move to another centroid
gidy += blockDim.y*gridDim.y;
}
}
// =========================================================
// Helper functions
// =========================================================
/// Randomly choose new centroids
/** Centroid is randomly chosen with k-means++ algorithm.
*
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param rand Random number drawn uniformly from [0,1).
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are n x d.
* @param dists (Input, device memory, 2*n entries) Workspace. The
* first n entries should be the distance between observation
* vectors and the closest centroid.
* @param centroid (Output, device memory, d entries) Centroid
* coordinates.
* @return Zero if successful. Otherwise non-zero.
*/
template <typename IndexType_, typename ValueType_> static
int chooseNewCentroid(IndexType_ n, IndexType_ d, IndexType_ k,
ValueType_ rand,
const ValueType_ * __restrict__ obs,
ValueType_ * __restrict__ dists,
ValueType_ * __restrict__ centroid) {
using namespace thrust;
// Cumulative sum of distances
ValueType_ * distsCumSum = dists + n;
// Residual sum of squares
ValueType_ distsSum;
// Observation vector that is chosen as new centroid
IndexType_ obsIndex;
// Compute cumulative sum of distances
inclusive_scan(device_pointer_cast(dists),
device_pointer_cast(dists+n),
device_pointer_cast(distsCumSum));
cudaCheckError();
CHECK_CUDA(cudaMemcpy(&distsSum, distsCumSum+n-1,
sizeof(ValueType_),
cudaMemcpyDeviceToHost));
// Randomly choose observation vector
// Probabilities are proportional to square of distance to closest
// centroid (see k-means++ algorithm)
obsIndex = (lower_bound(device_pointer_cast(distsCumSum),
device_pointer_cast(distsCumSum+n),
distsSum*rand)
- device_pointer_cast(distsCumSum));
cudaCheckError();
obsIndex = max(obsIndex, 0);
obsIndex = min(obsIndex, n-1);
// Record new centroid position
CHECK_CUDA(cudaMemcpyAsync(centroid, obs+IDX(0,obsIndex,d),
d*sizeof(ValueType_),
cudaMemcpyDeviceToDevice));
return 0;
}
/// Choose initial cluster centroids for k-means algorithm
/** Centroids are randomly chosen with k-means++ algorithm
*
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are d x n.
* @param centroids (Output, device memory, d*k entries) Centroid
* matrix. Matrix is stored column-major and each column is a
* centroid. Matrix dimensions are d x k.
* @param codes (Output, device memory, n entries) Cluster
* assignments.
* @param clusterSizes (Output, device memory, k entries) Number of
* points in each cluster.
* @param dists (Output, device memory, 2*n entries) Workspace. On
* exit, the first n entries give the square of the Euclidean
* distance between observation vectors and the closest centroid.
* @return Zero if successful. Otherwise non-zero.
*/
template <typename IndexType_, typename ValueType_> static
int initializeCentroids(IndexType_ n, IndexType_ d, IndexType_ k,
const ValueType_ * __restrict__ obs,
ValueType_ * __restrict__ centroids,
IndexType_ * __restrict__ codes,
IndexType_ * __restrict__ clusterSizes,
ValueType_ * __restrict__ dists) {
// -------------------------------------------------------
// Variable declarations
// -------------------------------------------------------
// Loop index
IndexType_ i;
// CUDA grid dimensions
dim3 blockDim_warp, gridDim_warp, gridDim_block;
// Random number generator
thrust::default_random_engine rng(123456);
thrust::uniform_real_distribution<ValueType_> uniformDist(0,1);
// -------------------------------------------------------
// Implementation
// -------------------------------------------------------
// Initialize grid dimensions
blockDim_warp.x = WARP_SIZE;
blockDim_warp.y = 1;
blockDim_warp.z = BSIZE_DIV_WSIZE;
gridDim_warp.x = min((d+WARP_SIZE-1)/WARP_SIZE, 65535);
gridDim_warp.y = 1;
gridDim_warp.z
= min((n+BSIZE_DIV_WSIZE-1)/BSIZE_DIV_WSIZE, 65535);
gridDim_block.x = min((n+BLOCK_SIZE-1)/BLOCK_SIZE, 65535);
gridDim_block.y = 1;
gridDim_block.z = 1;
// Assign observation vectors to code 0
CHECK_CUDA(cudaMemsetAsync(codes, 0, n*sizeof(IndexType_)));
// Choose first centroid
thrust::fill(thrust::device_pointer_cast(dists),
thrust::device_pointer_cast(dists+n), 1);
cudaCheckError();
if(chooseNewCentroid(n, d, k, uniformDist(rng), obs, dists, centroids))
WARNING("error in k-means++ (could not pick centroid)");
// Compute distances from first centroid
CHECK_CUDA(cudaMemsetAsync(dists, 0, n*sizeof(ValueType_)));
computeDistances <<< gridDim_warp, blockDim_warp >>>
(n, d, 1, obs, centroids, dists);
cudaCheckError()
// Choose remaining centroids
for(i=1; i<k; ++i) {
// Choose ith centroid
if(chooseNewCentroid(n, d, k, uniformDist(rng),obs, dists, centroids+IDX(0,i,d)))
WARNING("error in k-means++ (could not pick centroid)");
// Compute distances from ith centroid
CHECK_CUDA(cudaMemsetAsync(dists+n, 0, n*sizeof(ValueType_)));
computeDistances <<< gridDim_warp, blockDim_warp >>>
(n, d, 1, obs, centroids+IDX(0,i,d), dists+n);
cudaCheckError();
// Recompute minimum distances
minDistances2 <<< gridDim_block, BLOCK_SIZE >>>
(n, dists, dists+n, codes, i);
cudaCheckError();
}
// Compute cluster sizes
CHECK_CUDA(cudaMemsetAsync(clusterSizes, 0, k*sizeof(IndexType_)));
computeClusterSizes <<< gridDim_block, BLOCK_SIZE >>>
(n, k, codes, clusterSizes);
cudaCheckError();
return 0;
}
/// Find cluster centroids closest to observation vectors
/** Distance is measured with Euclidean norm.
*
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are d x n.
* @param centroids (Input, device memory, d*k entries) Centroid
* matrix. Matrix is stored column-major and each column is a
* centroid. Matrix dimensions are d x k.
* @param dists (Output, device memory, n*k entries) Workspace. On
* exit, the first n entries give the square of the Euclidean
* distance between observation vectors and the closest centroid.
* @param codes (Output, device memory, n entries) Cluster
* assignments.
* @param clusterSizes (Output, device memory, k entries) Number of
* points in each cluster.
* @param residual_host (Output, host memory, 1 entry) Residual sum
* of squares of assignment.
* @return Zero if successful. Otherwise non-zero.
*/
template <typename IndexType_, typename ValueType_> static
int assignCentroids(IndexType_ n, IndexType_ d, IndexType_ k,
const ValueType_ * __restrict__ obs,
const ValueType_ * __restrict__ centroids,
ValueType_ * __restrict__ dists,
IndexType_ * __restrict__ codes,
IndexType_ * __restrict__ clusterSizes,
ValueType_ * residual_host) {
// CUDA grid dimensions
dim3 blockDim, gridDim;
// Compute distance between centroids and observation vectors
CHECK_CUDA(cudaMemsetAsync(dists, 0, n*k*sizeof(ValueType_)));
blockDim.x = WARP_SIZE;
blockDim.y = 1;
blockDim.z = BLOCK_SIZE/WARP_SIZE;
gridDim.x = min((d+WARP_SIZE-1)/WARP_SIZE, 65535);
gridDim.y = min(k, 65535);
gridDim.z = min((n+BSIZE_DIV_WSIZE-1)/BSIZE_DIV_WSIZE, 65535);
computeDistances <<< gridDim, blockDim >>> (n, d, k,
obs, centroids,
dists);
cudaCheckError();
// Find centroid closest to each observation vector
CHECK_CUDA(cudaMemsetAsync(clusterSizes,0,k*sizeof(IndexType_)));
blockDim.x = BLOCK_SIZE;
blockDim.y = 1;
blockDim.z = 1;
gridDim.x = min((n+BLOCK_SIZE-1)/BLOCK_SIZE, 65535);
gridDim.y = 1;
gridDim.z = 1;
minDistances <<< gridDim, blockDim >>> (n, k, dists, codes,
clusterSizes);
cudaCheckError();
// Compute residual sum of squares
*residual_host
= thrust::reduce(thrust::device_pointer_cast(dists),
thrust::device_pointer_cast(dists+n));
return 0;
}
/// Update cluster centroids for k-means algorithm
/** All clusters are assumed to be non-empty.
*
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are d x n.
* @param codes (Input, device memory, n entries) Cluster
* assignments.
* @param clusterSizes (Input, device memory, k entries) Number of
* points in each cluster.
* @param centroids (Output, device memory, d*k entries) Centroid
* matrix. Matrix is stored column-major and each column is a
* centroid. Matrix dimensions are d x k.
* @param work (Output, device memory, n*d entries) Workspace.
* @param work_int (Output, device memory, 2*d*n entries)
* Workspace.
* @return Zero if successful. Otherwise non-zero.
*/
template <typename IndexType_, typename ValueType_> static
int updateCentroids(IndexType_ n, IndexType_ d, IndexType_ k,
const ValueType_ * __restrict__ obs,
const IndexType_ * __restrict__ codes,
const IndexType_ * __restrict__ clusterSizes,
ValueType_ * __restrict__ centroids,
ValueType_ * __restrict__ work,
IndexType_ * __restrict__ work_int) {
using namespace thrust;
// -------------------------------------------------------
// Variable declarations
// -------------------------------------------------------
// Useful constants
const ValueType_ one = 1;
const ValueType_ zero = 0;
// CUDA grid dimensions
dim3 blockDim, gridDim;
// Device memory
device_ptr<ValueType_> obs_copy(work);
device_ptr<IndexType_> codes_copy(work_int);
device_ptr<IndexType_> rows(work_int+d*n);
// Take transpose of observation matrix
Cublas::geam(true, false, n, d,
&one, obs, d, &zero, (ValueType_*) NULL, n,
raw_pointer_cast(obs_copy), n);
// Cluster assigned to each observation matrix entry
sequence(rows, rows+d*n);
cudaCheckError();
transform(rows, rows+d*n, make_constant_iterator<IndexType_>(n),
rows, modulus<IndexType_>());
cudaCheckError();
gather(rows, rows+d*n, device_pointer_cast(codes), codes_copy);
cudaCheckError();
// Row associated with each observation matrix entry
sequence(rows, rows+d*n);
cudaCheckError();
transform(rows, rows+d*n, make_constant_iterator<IndexType_>(n),
rows, divides<IndexType_>());
cudaCheckError();
// Sort and reduce to add observation vectors in same cluster
stable_sort_by_key(codes_copy, codes_copy+d*n,
make_zip_iterator(make_tuple(obs_copy, rows)));
cudaCheckError();
reduce_by_key(rows, rows+d*n, obs_copy,
codes_copy, // Output to codes_copy is ignored
device_pointer_cast(centroids));
cudaCheckError();
// Divide sums by cluster size to get centroid matrix
blockDim.x = WARP_SIZE;
blockDim.y = BLOCK_SIZE/WARP_SIZE;
blockDim.z = 1;
gridDim.x = min((d+WARP_SIZE-1)/WARP_SIZE, 65535);
gridDim.y = min((k+BSIZE_DIV_WSIZE-1)/BSIZE_DIV_WSIZE, 65535);
gridDim.z = 1;
divideCentroids <<< gridDim, blockDim >>> (d, k, clusterSizes,
centroids);
cudaCheckError();
return 0;
}
}
namespace nvgraph {
// =========================================================
// k-means algorithm
// =========================================================
/// Find clusters with k-means algorithm
/** Initial centroids are chosen with k-means++ algorithm. Empty
* clusters are reinitialized by choosing new centroids with
* k-means++ algorithm.
*
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param tol Tolerance for convergence. k-means stops when the
* change in residual divided by n is less than tol.
* @param maxiter Maximum number of k-means iterations.
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are d x n.
* @param codes (Output, device memory, n entries) Cluster
* assignments.
* @param clusterSizes (Output, device memory, k entries) Number of
* points in each cluster.
* @param centroids (Output, device memory, d*k entries) Centroid
* matrix. Matrix is stored column-major and each column is a
* centroid. Matrix dimensions are d x k.
* @param work (Output, device memory, n*max(k,d) entries)
* Workspace.
* @param work_int (Output, device memory, 2*d*n entries)
* Workspace.
* @param residual_host (Output, host memory, 1 entry) Residual sum
* of squares (sum of squares of distances between observation
* vectors and centroids).
* @param iters_host (Output, host memory, 1 entry) Number of
* k-means iterations.
* @return NVGRAPH error flag.
*/
template <typename IndexType_, typename ValueType_>
NVGRAPH_ERROR kmeans(IndexType_ n, IndexType_ d, IndexType_ k,
ValueType_ tol, IndexType_ maxiter,
const ValueType_ * __restrict__ obs,
IndexType_ * __restrict__ codes,
IndexType_ * __restrict__ clusterSizes,
ValueType_ * __restrict__ centroids,
ValueType_ * __restrict__ work,
IndexType_ * __restrict__ work_int,
ValueType_ * residual_host,
IndexType_ * iters_host) {
// -------------------------------------------------------
// Variable declarations
// -------------------------------------------------------
// Current iteration
IndexType_ iter;
// Residual sum of squares at previous iteration
ValueType_ residualPrev = 0;
// Random number generator
thrust::default_random_engine rng(123456);
thrust::uniform_real_distribution<ValueType_> uniformDist(0,1);
// -------------------------------------------------------
// Initialization
// -------------------------------------------------------
// Check that parameters are valid
if(n < 1) {
WARNING("invalid parameter (n<1)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
if(d < 1) {
WARNING("invalid parameter (d<1)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
if(k < 1) {
WARNING("invalid parameter (k<1)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
if(tol < 0) {
WARNING("invalid parameter (tol<0)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
if(maxiter < 0) {
WARNING("invalid parameter (maxiter<0)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
// Trivial cases
if(k == 1) {
CHECK_CUDA(cudaMemsetAsync(codes, 0, n*sizeof(IndexType_)));
CHECK_CUDA(cudaMemcpyAsync(clusterSizes, &n, sizeof(IndexType_),
cudaMemcpyHostToDevice));
if(updateCentroids(n, d, k, obs, codes,
clusterSizes, centroids,
work, work_int))
WARNING("could not compute k-means centroids");
dim3 blockDim, gridDim;
blockDim.x = WARP_SIZE;
blockDim.y = 1;
blockDim.z = BLOCK_SIZE/WARP_SIZE;
gridDim.x = min((d+WARP_SIZE-1)/WARP_SIZE, 65535);
gridDim.y = 1;
gridDim.z = min((n+BLOCK_SIZE/WARP_SIZE-1)/(BLOCK_SIZE/WARP_SIZE), 65535);
CHECK_CUDA(cudaMemsetAsync(work, 0, n*k*sizeof(ValueType_)));
computeDistances <<< gridDim, blockDim >>> (n, d, 1,
obs,
centroids,
work);
cudaCheckError();
*residual_host = thrust::reduce(thrust::device_pointer_cast(work),
thrust::device_pointer_cast(work+n));
cudaCheckError();
return NVGRAPH_OK;
}
if(n <= k) {
thrust::sequence(thrust::device_pointer_cast(codes),
thrust::device_pointer_cast(codes+n));
cudaCheckError();
thrust::fill_n(thrust::device_pointer_cast(clusterSizes), n, 1);
cudaCheckError();
if(n < k)
CHECK_CUDA(cudaMemsetAsync(clusterSizes+n, 0, (k-n)*sizeof(IndexType_)));
CHECK_CUDA(cudaMemcpyAsync(centroids, obs, d*n*sizeof(ValueType_),
cudaMemcpyDeviceToDevice));
*residual_host = 0;
return NVGRAPH_OK;
}
// Initialize cuBLAS
Cublas::set_pointer_mode_host();
// -------------------------------------------------------
// k-means++ algorithm
// -------------------------------------------------------
// Choose initial cluster centroids
if(initializeCentroids(n, d, k, obs, centroids, codes,
clusterSizes, work))
WARNING("could not initialize k-means centroids");
// Apply k-means iteration until convergence
for(iter=0; iter<maxiter; ++iter) {
// Update cluster centroids
if(updateCentroids(n, d, k, obs, codes,
clusterSizes, centroids,
work, work_int)) WARNING("could not update k-means centroids");
// Determine centroid closest to each observation
residualPrev = *residual_host;
if(assignCentroids(n, d, k, obs, centroids, work,
codes, clusterSizes, residual_host))
WARNING("could not assign observation vectors to k-means clusters");
// Reinitialize empty clusters with new centroids
IndexType_ emptyCentroid = (thrust::find(thrust::device_pointer_cast(clusterSizes),
thrust::device_pointer_cast(clusterSizes+k), 0) - thrust::device_pointer_cast(clusterSizes));
while(emptyCentroid < k) {
if(chooseNewCentroid(n, d, k, uniformDist(rng), obs, work, centroids+IDX(0,emptyCentroid,d)))
WARNING("could not replace empty centroid");
if(assignCentroids(n, d, k, obs, centroids, work, codes, clusterSizes, residual_host))
WARNING("could not assign observation vectors to k-means clusters");
emptyCentroid = (thrust::find(thrust::device_pointer_cast(clusterSizes),
thrust::device_pointer_cast(clusterSizes+k), 0) - thrust::device_pointer_cast(clusterSizes));
cudaCheckError();
}
// Check for convergence
if(fabs(residualPrev-(*residual_host))/n < tol) {
++iter;
break;
}
}
// Warning if k-means has failed to converge
if(fabs(residualPrev-(*residual_host))/n >= tol)
WARNING("k-means failed to converge");
*iters_host = iter;
return NVGRAPH_OK;
}
/// Find clusters with k-means algorithm
/** Initial centroids are chosen with k-means++ algorithm. Empty
* clusters are reinitialized by choosing new centroids with
* k-means++ algorithm.
*
* CNMEM must be initialized before calling this function.
*
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param tol Tolerance for convergence. k-means stops when the
* change in residual divided by n is less than tol.
* @param maxiter Maximum number of k-means iterations.
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are d x n.
* @param codes (Output, device memory, n entries) Cluster
* assignments.
* @param residual On exit, residual sum of squares (sum of squares
* of distances between observation vectors and centroids).
* @param On exit, number of k-means iterations.
* @return NVGRAPH error flag
*/
template <typename IndexType_, typename ValueType_>
NVGRAPH_ERROR kmeans(IndexType_ n, IndexType_ d, IndexType_ k,
ValueType_ tol, IndexType_ maxiter,
const ValueType_ * __restrict__ obs,
IndexType_ * __restrict__ codes,
ValueType_ & residual,
IndexType_ & iters) {
// Check that parameters are valid
if(n < 1) {
WARNING("invalid parameter (n<1)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
if(d < 1) {
WARNING("invalid parameter (d<1)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
if(k < 1) {
WARNING("invalid parameter (k<1)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
if(tol < 0) {
WARNING("invalid parameter (tol<0)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
if(maxiter < 0) {
WARNING("invalid parameter (maxiter<0)");
return NVGRAPH_ERR_BAD_PARAMETERS;
}
// Allocate memory
// TODO: handle non-zero CUDA streams
cudaStream_t stream = 0;
Vector<IndexType_> clusterSizes(k, stream);
Vector<ValueType_> centroids(d*k, stream);
Vector<ValueType_> work(n*max(k,d), stream);
Vector<IndexType_> work_int(2*d*n, stream);
// Perform k-means
return kmeans<IndexType_,ValueType_>(n, d, k, tol, maxiter,
obs, codes,
clusterSizes.raw(),
centroids.raw(),
work.raw(), work_int.raw(),
&residual, &iters);
}
// =========================================================
// Explicit instantiations
// =========================================================
template
NVGRAPH_ERROR kmeans<int, float>(int n, int d, int k,
float tol, int maxiter,
const float * __restrict__ obs,
int * __restrict__ codes,
float & residual,
int & iters);
template
NVGRAPH_ERROR kmeans<int, double>(int n, int d, int k,
double tol, int maxiter,
const double * __restrict__ obs,
int * __restrict__ codes,
double & residual,
int & iters);
}
//#endif //NVGRAPH_PARTITION
//#endif //debug
|
724a64e547604330d725553e210ad2993bc78601.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mLibCuda.h"
#define THREADS_PER_BLOCK 128
__global__ void getSiftTransformCU_Kernel(unsigned int curFrameIndex,
const float4x4* d_completeTrajectory, unsigned int lastValidCompleteTransform,
float4x4* d_siftTrajectory, unsigned int curFrameIndexAll,
const int* d_currNumFilteredMatchesPerImagePair,
const float4x4* d_filteredTransformsInv, float4x4* d_currIntegrateTrans)
{
for (int i = (int)curFrameIndex - 1; i >= 0; i--) {
////debugging
//printf("[frame %d | %d] to frame %d: #match %d\n", curFrameIndexAll, curFrameIndex, i, d_currNumFilteredMatchesPerImagePair[i]);
////debugging
if (d_currNumFilteredMatchesPerImagePair[i] > 0) {
float4x4 transform;
const unsigned int idxPrevSiftKnown = curFrameIndexAll - (curFrameIndex - i);
d_siftTrajectory[curFrameIndexAll] = d_siftTrajectory[idxPrevSiftKnown] * d_filteredTransformsInv[i];
////debugging
//printf("\tidxPrevSiftKnown = %d\n", idxPrevSiftKnown);
//printf("d_filteredTransformsInv[%d]\n", i);
//d_filteredTransformsInv[i].print();
//printf("d_siftTrajectory[%d]\n", idxPrevSiftKnown);
//d_siftTrajectory[idxPrevSiftKnown].print();
//printf("d_siftTrajectory[%d]\n", curFrameIndexAll);
//d_siftTrajectory[curFrameIndexAll].print();
////debugging
if (lastValidCompleteTransform == 0) {
transform = d_siftTrajectory[curFrameIndexAll];
}
else if (idxPrevSiftKnown < lastValidCompleteTransform) {
transform = d_completeTrajectory[idxPrevSiftKnown] * d_filteredTransformsInv[i];
}
else {
const float4x4 offset = d_siftTrajectory[lastValidCompleteTransform].getInverse() * d_siftTrajectory[idxPrevSiftKnown];
transform = d_completeTrajectory[lastValidCompleteTransform] * offset * d_filteredTransformsInv[i];
}
d_currIntegrateTrans[0] = transform;
////debugging
//printf("transform\n");
//transform.print();
////debugging
break;
}
}
}
extern "C" void computeSiftTransformCU(const float4x4* d_currFilteredTransformsInv, const int* d_currNumFilteredMatchesPerImagePair,
const float4x4* d_completeTrajectory, unsigned int lastValidCompleteTransform,
float4x4* d_siftTrajectory, unsigned int curFrameIndexAll, unsigned int curFrameIndex, float4x4* d_currIntegrateTrans)
{
if (curFrameIndex == 0) return;
getSiftTransformCU_Kernel << <1, 1 >> >(curFrameIndex,
d_completeTrajectory, lastValidCompleteTransform,
d_siftTrajectory, curFrameIndexAll,
d_currNumFilteredMatchesPerImagePair, d_currFilteredTransformsInv,
d_currIntegrateTrans);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void updateTrajectoryCU_Kernel(const float4x4* d_globalTrajectory, unsigned int numGlobalTransforms,
float4x4* d_completeTrajectory, unsigned int numCompleteTransforms,
const float4x4* d_localTrajectories, unsigned int numLocalTransformsPerTrajectory,
int* d_imageInvalidateList)
{
const unsigned int idxComplete = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int submapSize = numLocalTransformsPerTrajectory - 1;
if (idxComplete < numCompleteTransforms) {
const unsigned int idxGlobal = idxComplete / submapSize;
const unsigned int idxLocal = idxComplete % submapSize;
if (d_imageInvalidateList[idxComplete] == 0) {
d_completeTrajectory[idxComplete].setValue(MINF);
}
else {
d_completeTrajectory[idxComplete] = d_globalTrajectory[idxGlobal] * d_localTrajectories[idxGlobal * numLocalTransformsPerTrajectory + idxLocal];
}
}
}
extern "C" void updateTrajectoryCU(
const float4x4* d_globalTrajectory, unsigned int numGlobalTransforms, float4x4* d_completeTrajectory, unsigned int numCompleteTransforms,
const float4x4* d_localTrajectories, unsigned int numLocalTransformsPerTrajectory, unsigned int numLocalTrajectories,
int* d_imageInvalidateList)
{
const unsigned int N = numCompleteTransforms;
hipLaunchKernelGGL(( updateTrajectoryCU_Kernel) , dim3((N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK) , 0, 0,
d_globalTrajectory, numGlobalTransforms,
d_completeTrajectory, numCompleteTransforms,
d_localTrajectories, numLocalTransformsPerTrajectory,
d_imageInvalidateList);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void initNextGlobalTransformCU_Kernel(float4x4* d_globalTrajectory, unsigned int numGlobalTransforms, unsigned int initGlobalIdx,
float4x4* d_localTrajectories, unsigned int lastValidLocal, unsigned int numLocalTransformsPerTrajectory)
{
//if (d_localTrajectories[numGlobalTransforms*numLocalTransformsPerTrajectory - 1].m11 == MINF) {
// printf("[ERROR initNextGlobalTransformCU_Kernel]: d_localTrajectories[%d*%d-1] INVALID!\n", numGlobalTransforms, numLocalTransformsPerTrajectory);//debugging
//}
//d_globalTrajectory[numGlobalTransforms] = d_globalTrajectory[initGlobalIdx] * d_localTrajectories[numGlobalTransforms*numLocalTransformsPerTrajectory - 1];
if (d_localTrajectories[numGlobalTransforms*numLocalTransformsPerTrajectory - (numLocalTransformsPerTrajectory - lastValidLocal)].m11 == MINF) {
printf("[ERROR initNextGlobalTransformCU_Kernel]: d_localTrajectories[%d*%d-1] INVALID!\n", numGlobalTransforms, numLocalTransformsPerTrajectory);//debugging
}
d_globalTrajectory[numGlobalTransforms] = d_globalTrajectory[initGlobalIdx] * d_localTrajectories[numGlobalTransforms*numLocalTransformsPerTrajectory - (numLocalTransformsPerTrajectory - lastValidLocal)];
}
extern "C" void initNextGlobalTransformCU(
float4x4* d_globalTrajectory, unsigned int numGlobalTransforms, unsigned int initGlobalIdx,
float4x4* d_localTrajectories, unsigned int lastValidLocal, unsigned int numLocalTransformsPerTrajectory)
{
hipLaunchKernelGGL(( initNextGlobalTransformCU_Kernel) , dim3(1), dim3(1) , 0, 0,
d_globalTrajectory, numGlobalTransforms, initGlobalIdx,
d_localTrajectories, lastValidLocal, numLocalTransformsPerTrajectory);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////update with local opt result
//__global__ void refineNextGlobalTransformCU_Kernel(float4x4* d_globalTrajectory, unsigned int numGlobalTransforms,
// float4x4* d_localTrajectories, unsigned int numLocalTransformsPerTrajectory, unsigned int lastValidLocal)
//{
// //d_globalTrajectory[numGlobalTransforms] already init from above
// d_globalTrajectory[numGlobalTransforms] = d_globalTrajectory[numGlobalTransforms] * d_localTrajectories[numGlobalTransforms*numLocalTransformsPerTrajectory - 1];
//}
//
//extern "C" void refineNextGlobalTransformCU(
// float4x4* d_globalTrajectory, unsigned int numGlobalTransforms,
// unsigned int initGlobalIdx,
// float4x4* d_localTrajectories, unsigned int numLocalTransformsPerTrajectory)
//{
//hipLaunchKernelGGL(( initNextGlobalTransformCU_Kernel) , dim3(1), dim3(1) , 0, 0,
// d_globalTrajectory, numGlobalTransforms, initGlobalIdx,
// d_localTrajectories, numLocalTransformsPerTrajectory);
//
//#ifdef _DEBUG
// cutilSafeCall(hipDeviceSynchronize());
// cutilCheckMsg(__FUNCTION__);
//#endif
//}
| 724a64e547604330d725553e210ad2993bc78601.cu |
#include "mLibCuda.h"
#define THREADS_PER_BLOCK 128
__global__ void getSiftTransformCU_Kernel(unsigned int curFrameIndex,
const float4x4* d_completeTrajectory, unsigned int lastValidCompleteTransform,
float4x4* d_siftTrajectory, unsigned int curFrameIndexAll,
const int* d_currNumFilteredMatchesPerImagePair,
const float4x4* d_filteredTransformsInv, float4x4* d_currIntegrateTrans)
{
for (int i = (int)curFrameIndex - 1; i >= 0; i--) {
////debugging
//printf("[frame %d | %d] to frame %d: #match %d\n", curFrameIndexAll, curFrameIndex, i, d_currNumFilteredMatchesPerImagePair[i]);
////debugging
if (d_currNumFilteredMatchesPerImagePair[i] > 0) {
float4x4 transform;
const unsigned int idxPrevSiftKnown = curFrameIndexAll - (curFrameIndex - i);
d_siftTrajectory[curFrameIndexAll] = d_siftTrajectory[idxPrevSiftKnown] * d_filteredTransformsInv[i];
////debugging
//printf("\tidxPrevSiftKnown = %d\n", idxPrevSiftKnown);
//printf("d_filteredTransformsInv[%d]\n", i);
//d_filteredTransformsInv[i].print();
//printf("d_siftTrajectory[%d]\n", idxPrevSiftKnown);
//d_siftTrajectory[idxPrevSiftKnown].print();
//printf("d_siftTrajectory[%d]\n", curFrameIndexAll);
//d_siftTrajectory[curFrameIndexAll].print();
////debugging
if (lastValidCompleteTransform == 0) {
transform = d_siftTrajectory[curFrameIndexAll];
}
else if (idxPrevSiftKnown < lastValidCompleteTransform) {
transform = d_completeTrajectory[idxPrevSiftKnown] * d_filteredTransformsInv[i];
}
else {
const float4x4 offset = d_siftTrajectory[lastValidCompleteTransform].getInverse() * d_siftTrajectory[idxPrevSiftKnown];
transform = d_completeTrajectory[lastValidCompleteTransform] * offset * d_filteredTransformsInv[i];
}
d_currIntegrateTrans[0] = transform;
////debugging
//printf("transform\n");
//transform.print();
////debugging
break;
}
}
}
extern "C" void computeSiftTransformCU(const float4x4* d_currFilteredTransformsInv, const int* d_currNumFilteredMatchesPerImagePair,
const float4x4* d_completeTrajectory, unsigned int lastValidCompleteTransform,
float4x4* d_siftTrajectory, unsigned int curFrameIndexAll, unsigned int curFrameIndex, float4x4* d_currIntegrateTrans)
{
if (curFrameIndex == 0) return;
getSiftTransformCU_Kernel << <1, 1 >> >(curFrameIndex,
d_completeTrajectory, lastValidCompleteTransform,
d_siftTrajectory, curFrameIndexAll,
d_currNumFilteredMatchesPerImagePair, d_currFilteredTransformsInv,
d_currIntegrateTrans);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void updateTrajectoryCU_Kernel(const float4x4* d_globalTrajectory, unsigned int numGlobalTransforms,
float4x4* d_completeTrajectory, unsigned int numCompleteTransforms,
const float4x4* d_localTrajectories, unsigned int numLocalTransformsPerTrajectory,
int* d_imageInvalidateList)
{
const unsigned int idxComplete = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int submapSize = numLocalTransformsPerTrajectory - 1;
if (idxComplete < numCompleteTransforms) {
const unsigned int idxGlobal = idxComplete / submapSize;
const unsigned int idxLocal = idxComplete % submapSize;
if (d_imageInvalidateList[idxComplete] == 0) {
d_completeTrajectory[idxComplete].setValue(MINF);
}
else {
d_completeTrajectory[idxComplete] = d_globalTrajectory[idxGlobal] * d_localTrajectories[idxGlobal * numLocalTransformsPerTrajectory + idxLocal];
}
}
}
extern "C" void updateTrajectoryCU(
const float4x4* d_globalTrajectory, unsigned int numGlobalTransforms, float4x4* d_completeTrajectory, unsigned int numCompleteTransforms,
const float4x4* d_localTrajectories, unsigned int numLocalTransformsPerTrajectory, unsigned int numLocalTrajectories,
int* d_imageInvalidateList)
{
const unsigned int N = numCompleteTransforms;
updateTrajectoryCU_Kernel <<<(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(
d_globalTrajectory, numGlobalTransforms,
d_completeTrajectory, numCompleteTransforms,
d_localTrajectories, numLocalTransformsPerTrajectory,
d_imageInvalidateList);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void initNextGlobalTransformCU_Kernel(float4x4* d_globalTrajectory, unsigned int numGlobalTransforms, unsigned int initGlobalIdx,
float4x4* d_localTrajectories, unsigned int lastValidLocal, unsigned int numLocalTransformsPerTrajectory)
{
//if (d_localTrajectories[numGlobalTransforms*numLocalTransformsPerTrajectory - 1].m11 == MINF) {
// printf("[ERROR initNextGlobalTransformCU_Kernel]: d_localTrajectories[%d*%d-1] INVALID!\n", numGlobalTransforms, numLocalTransformsPerTrajectory);//debugging
//}
//d_globalTrajectory[numGlobalTransforms] = d_globalTrajectory[initGlobalIdx] * d_localTrajectories[numGlobalTransforms*numLocalTransformsPerTrajectory - 1];
if (d_localTrajectories[numGlobalTransforms*numLocalTransformsPerTrajectory - (numLocalTransformsPerTrajectory - lastValidLocal)].m11 == MINF) {
printf("[ERROR initNextGlobalTransformCU_Kernel]: d_localTrajectories[%d*%d-1] INVALID!\n", numGlobalTransforms, numLocalTransformsPerTrajectory);//debugging
}
d_globalTrajectory[numGlobalTransforms] = d_globalTrajectory[initGlobalIdx] * d_localTrajectories[numGlobalTransforms*numLocalTransformsPerTrajectory - (numLocalTransformsPerTrajectory - lastValidLocal)];
}
extern "C" void initNextGlobalTransformCU(
float4x4* d_globalTrajectory, unsigned int numGlobalTransforms, unsigned int initGlobalIdx,
float4x4* d_localTrajectories, unsigned int lastValidLocal, unsigned int numLocalTransformsPerTrajectory)
{
initNextGlobalTransformCU_Kernel <<< 1, 1 >>>(
d_globalTrajectory, numGlobalTransforms, initGlobalIdx,
d_localTrajectories, lastValidLocal, numLocalTransformsPerTrajectory);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////update with local opt result
//__global__ void refineNextGlobalTransformCU_Kernel(float4x4* d_globalTrajectory, unsigned int numGlobalTransforms,
// float4x4* d_localTrajectories, unsigned int numLocalTransformsPerTrajectory, unsigned int lastValidLocal)
//{
// //d_globalTrajectory[numGlobalTransforms] already init from above
// d_globalTrajectory[numGlobalTransforms] = d_globalTrajectory[numGlobalTransforms] * d_localTrajectories[numGlobalTransforms*numLocalTransformsPerTrajectory - 1];
//}
//
//extern "C" void refineNextGlobalTransformCU(
// float4x4* d_globalTrajectory, unsigned int numGlobalTransforms,
// unsigned int initGlobalIdx,
// float4x4* d_localTrajectories, unsigned int numLocalTransformsPerTrajectory)
//{
// initNextGlobalTransformCU_Kernel <<< 1, 1 >>>(
// d_globalTrajectory, numGlobalTransforms, initGlobalIdx,
// d_localTrajectories, numLocalTransformsPerTrajectory);
//
//#ifdef _DEBUG
// cutilSafeCall(cudaDeviceSynchronize());
// cutilCheckMsg(__FUNCTION__);
//#endif
//}
|
2cd6f40ffa2cded05d4925d0898b79513a7c6e0d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/device_reference.h>
#include <thrust/tuple.h>
#include <thrust/transform.h>
#include <thrust/sequence.h>
#define SPHERES 20
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
#define DIM 2048
struct Sphere {
float r,b,g;
float radius;
float x,y,z;
__host__ __device__ float hit( float ox, float oy, float *n ) {
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf( radius*radius - dx*dx - dy*dy );
*n = dz / sqrtf( radius * radius );
return dz + z;
}
return -INF;
}
};
struct CalculateBitmap
{
Sphere* s;
CalculateBitmap(Sphere* sp) : s(sp) {}
__host__ __device__ thrust::tuple<unsigned char,unsigned char,unsigned char, unsigned char> operator()(const int& idx)const {
int x = idx / DIM;
int y = idx % DIM;
//int offset = idx;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
//printf("x:%d, y:%d, ox:%f, oy:%f\n",x,y,ox,oy);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++) {
float n;
float t = s[i].hit( ox, oy, &n );
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
/*
ptr[offset*4 + 0] = (int)(r * 255);
ptr[offset*4 + 1] = (int)(g * 255);
ptr[offset*4 + 2] = (int)(b * 255);
ptr[offset*4 + 3] = 255;
*/
thrust::tuple<unsigned char, unsigned char, unsigned char, unsigned char> result((int)(r*255),(int)(g*255),(int)(b*255),255);
return result;
}
};
void ppm_write(unsigned char* bitmap, int xdim,int ydim, FILE* fp)
{
int i,x,y;
fprintf(fp,"P3\n");
fprintf(fp,"%d %d\n",xdim, ydim);
fprintf(fp,"255\n");
for (y=0;y<ydim;y++) {
for (x=0;x<xdim;x++) {
i=x+y*xdim;
fprintf(fp,"%d %d %d ",bitmap[4*i],bitmap[4*i+1],bitmap[4*i+2]);
}
fprintf(fp,"\n");
}
}
int main(int argc, char* argv[])
{
srand(time(NULL));
if (argc!=2) {
printf("> a.out [filename.ppm]\n");
printf("for example, '> a.out result.ppm' means executing THRUST\n");
exit(0);
}
FILE* fp = fopen(argv[1],"w");
Sphere* temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES );
Sphere* dev_temp_s;
hipMalloc( (void**)&dev_temp_s, SPHERES*sizeof(Sphere));
for (int i=0; i<SPHERES; i++) {
temp_s[i].r = rnd( 1.0f );
temp_s[i].g = rnd( 1.0f );
temp_s[i].b = rnd( 1.0f );
temp_s[i].x = rnd( 2000.0f ) - 1000;
temp_s[i].y = rnd( 2000.0f ) - 1000;
temp_s[i].z = rnd( 2000.0f ) - 1000;
temp_s[i].radius = rnd( 200.0f ) + 40;
}
hipMemcpy(dev_temp_s, temp_s, SPHERES*sizeof(Sphere),hipMemcpyHostToDevice);
thrust::device_vector<thrust::tuple<unsigned char, unsigned char, unsigned char, unsigned char> > dev_bitm(DIM*DIM);
thrust::device_vector<int> idx(DIM*DIM);
thrust::sequence(idx.begin(),idx.end());
unsigned char* bitmap = (unsigned char*) malloc(sizeof(unsigned char)*DIM*DIM*4);
unsigned char* dev_bitmap;
hipMalloc((void**)&dev_bitmap,sizeof(unsigned char)*DIM*DIM*4);
clock_t start = clock();
thrust::transform(idx.begin(),idx.end(),dev_bitm.begin(),CalculateBitmap(dev_temp_s));
clock_t end = clock();
//printf("end of parallel\n");
thrust::host_vector<thrust::tuple<unsigned char,unsigned char,unsigned char, unsigned char> > bitm = dev_bitm;
for(int i=0;i<DIM;i++){
for(int j=0;j<DIM;j++){
for(int k=0;k<4;k++){
bitmap[(i*DIM+j)*4 + 0] = thrust::get<0>(bitm[i+j*DIM]);
bitmap[(i*DIM+j)*4 + 1] = thrust::get<1>(bitm[i+j*DIM]);
bitmap[(i*DIM+j)*4 + 2] = thrust::get<2>(bitm[i+j*DIM]);
bitmap[(i*DIM+j)*4 + 4] = thrust::get<3>(bitm[i+j*DIM]);
}
}
}
//clock_t end = clock();
//printf("end of copy\n");
ppm_write(bitmap,DIM,DIM,fp);
fclose(fp);
//free(bitmap);
//free(temp_s);
printf("THRUST ray tracing: %1.6f sec\n",(end-start) / (float)CLOCKS_PER_SEC);
printf("[%s] was generated.\n",argv[1]);
return 0;
}
| 2cd6f40ffa2cded05d4925d0898b79513a7c6e0d.cu | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/device_reference.h>
#include <thrust/tuple.h>
#include <thrust/transform.h>
#include <thrust/sequence.h>
#define SPHERES 20
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
#define DIM 2048
struct Sphere {
float r,b,g;
float radius;
float x,y,z;
__host__ __device__ float hit( float ox, float oy, float *n ) {
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf( radius*radius - dx*dx - dy*dy );
*n = dz / sqrtf( radius * radius );
return dz + z;
}
return -INF;
}
};
struct CalculateBitmap
{
Sphere* s;
CalculateBitmap(Sphere* sp) : s(sp) {}
__host__ __device__ thrust::tuple<unsigned char,unsigned char,unsigned char, unsigned char> operator()(const int& idx)const {
int x = idx / DIM;
int y = idx % DIM;
//int offset = idx;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
//printf("x:%d, y:%d, ox:%f, oy:%f\n",x,y,ox,oy);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++) {
float n;
float t = s[i].hit( ox, oy, &n );
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
/*
ptr[offset*4 + 0] = (int)(r * 255);
ptr[offset*4 + 1] = (int)(g * 255);
ptr[offset*4 + 2] = (int)(b * 255);
ptr[offset*4 + 3] = 255;
*/
thrust::tuple<unsigned char, unsigned char, unsigned char, unsigned char> result((int)(r*255),(int)(g*255),(int)(b*255),255);
return result;
}
};
void ppm_write(unsigned char* bitmap, int xdim,int ydim, FILE* fp)
{
int i,x,y;
fprintf(fp,"P3\n");
fprintf(fp,"%d %d\n",xdim, ydim);
fprintf(fp,"255\n");
for (y=0;y<ydim;y++) {
for (x=0;x<xdim;x++) {
i=x+y*xdim;
fprintf(fp,"%d %d %d ",bitmap[4*i],bitmap[4*i+1],bitmap[4*i+2]);
}
fprintf(fp,"\n");
}
}
int main(int argc, char* argv[])
{
srand(time(NULL));
if (argc!=2) {
printf("> a.out [filename.ppm]\n");
printf("for example, '> a.out result.ppm' means executing THRUST\n");
exit(0);
}
FILE* fp = fopen(argv[1],"w");
Sphere* temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES );
Sphere* dev_temp_s;
cudaMalloc( (void**)&dev_temp_s, SPHERES*sizeof(Sphere));
for (int i=0; i<SPHERES; i++) {
temp_s[i].r = rnd( 1.0f );
temp_s[i].g = rnd( 1.0f );
temp_s[i].b = rnd( 1.0f );
temp_s[i].x = rnd( 2000.0f ) - 1000;
temp_s[i].y = rnd( 2000.0f ) - 1000;
temp_s[i].z = rnd( 2000.0f ) - 1000;
temp_s[i].radius = rnd( 200.0f ) + 40;
}
cudaMemcpy(dev_temp_s, temp_s, SPHERES*sizeof(Sphere),cudaMemcpyHostToDevice);
thrust::device_vector<thrust::tuple<unsigned char, unsigned char, unsigned char, unsigned char> > dev_bitm(DIM*DIM);
thrust::device_vector<int> idx(DIM*DIM);
thrust::sequence(idx.begin(),idx.end());
unsigned char* bitmap = (unsigned char*) malloc(sizeof(unsigned char)*DIM*DIM*4);
unsigned char* dev_bitmap;
cudaMalloc((void**)&dev_bitmap,sizeof(unsigned char)*DIM*DIM*4);
clock_t start = clock();
thrust::transform(idx.begin(),idx.end(),dev_bitm.begin(),CalculateBitmap(dev_temp_s));
clock_t end = clock();
//printf("end of parallel\n");
thrust::host_vector<thrust::tuple<unsigned char,unsigned char,unsigned char, unsigned char> > bitm = dev_bitm;
for(int i=0;i<DIM;i++){
for(int j=0;j<DIM;j++){
for(int k=0;k<4;k++){
bitmap[(i*DIM+j)*4 + 0] = thrust::get<0>(bitm[i+j*DIM]);
bitmap[(i*DIM+j)*4 + 1] = thrust::get<1>(bitm[i+j*DIM]);
bitmap[(i*DIM+j)*4 + 2] = thrust::get<2>(bitm[i+j*DIM]);
bitmap[(i*DIM+j)*4 + 4] = thrust::get<3>(bitm[i+j*DIM]);
}
}
}
//clock_t end = clock();
//printf("end of copy\n");
ppm_write(bitmap,DIM,DIM,fp);
fclose(fp);
//free(bitmap);
//free(temp_s);
printf("THRUST ray tracing: %1.6f sec\n",(end-start) / (float)CLOCKS_PER_SEC);
printf("[%s] was generated.\n",argv[1]);
return 0;
}
|
6e09d42ab2ee9452ef0d4815ee37d924bc2de860.hip | // !!! This is a file automatically generated by hipify!!!
#define _USE_MATH_DEFINES
#include <cmath>
#include "sampling/sampler.hpp"
namespace rt {
void CSampler::init() {
hiprand_init(0, 0, 0, &m_curandState);
}
void CSampler::init(uint64_t seed, uint64_t sequence) {
hiprand_init(seed, sequence, 0, &m_curandState);
}
float CSampler::uniformSample01() {
return hiprand_uniform(&m_curandState);
}
glm::vec3 CSampler::uniformSampleHemisphere() {
float rand1 = hiprand_uniform(&m_curandState);
float rand2 = hiprand_uniform(&m_curandState);
float r = glm::sqrt(glm::max(0.0f, 1.0f - rand1 * rand1));
float phi = 2.0 * M_PI * rand2;
return glm::vec3(r * glm::cos(phi), r * glm::sin(phi), rand1);
}
float CSampler::uniformHemispherePdf() const {
return 1.0f / (2 * M_PI);
}
}
| 6e09d42ab2ee9452ef0d4815ee37d924bc2de860.cu | #define _USE_MATH_DEFINES
#include <cmath>
#include "sampling/sampler.hpp"
namespace rt {
void CSampler::init() {
curand_init(0, 0, 0, &m_curandState);
}
void CSampler::init(uint64_t seed, uint64_t sequence) {
curand_init(seed, sequence, 0, &m_curandState);
}
float CSampler::uniformSample01() {
return curand_uniform(&m_curandState);
}
glm::vec3 CSampler::uniformSampleHemisphere() {
float rand1 = curand_uniform(&m_curandState);
float rand2 = curand_uniform(&m_curandState);
float r = glm::sqrt(glm::max(0.0f, 1.0f - rand1 * rand1));
float phi = 2.0 * M_PI * rand2;
return glm::vec3(r * glm::cos(phi), r * glm::sin(phi), rand1);
}
float CSampler::uniformHemispherePdf() const {
return 1.0f / (2 * M_PI);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.