hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
6751d751e461f51f426855786c7ef3acb3febce3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device.hpp"
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Depth bilateral filter
namespace kfusion
{
namespace device
{
__global__ void bilateral_kernel(const PtrStepSz<ushort> src, PtrStep<ushort> dst, const int ksz, const float sigma_spatial2_inv_half, const float sigma_depth2_inv_half)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= src.cols || y >= src.rows)
return;
int value = src(y, x);
int tx = min (x - ksz / 2 + ksz, src.cols - 1);
int ty = min (y - ksz / 2 + ksz, src.rows - 1);
float sum1 = 0;
float sum2 = 0;
//:https://blog.csdn.net/piaoxuezhong/article/details/78302920
for (int cy = max (y - ksz / 2, 0); cy < ty; ++cy)
{
for (int cx = max (x - ksz / 2, 0); cx < tx; ++cx)
{
int depth = src(cy, cx);
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy); //
float color2 = (value - depth) * (value - depth); //
float weight = __expf (-(space2 * sigma_spatial2_inv_half + color2 * sigma_depth2_inv_half)); //e
sum1 += depth * weight;
sum2 += weight;
}
}
dst(y, x) = __float2int_rn (sum1 / sum2);
}
}
}
void kfusion::device::bilateralFilter (const Depth& src, Depth& dst, int kernel_size, float sigma_spatial, float sigma_depth) //kernel_size:7 sigma_depth:0.04 sigma_spatial:4.5
{
sigma_depth *= 1000; // meters -> mm
dim3 block (32, 8);
dim3 grid (divUp (src.cols (), block.x), divUp (src.rows (), block.y));
cudaSafeCall( hipFuncSetCacheConfig (bilateral_kernel, hipFuncCachePreferL1) ); // --
hipLaunchKernelGGL(( bilateral_kernel), dim3(grid), dim3(block), 0, 0, src, dst, kernel_size, 0.5f / (sigma_spatial * sigma_spatial), 0.5f / (sigma_depth * sigma_depth));
cudaSafeCall ( hipGetLastError () );
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Depth truncation
///
namespace kfusion
{
namespace device
{
__global__ void truncate_depth_kernel(PtrStepSz<ushort> depth, ushort max_dist /*mm*/)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < depth.cols && y < depth.rows)
if(depth(y, x) > max_dist)
depth(y, x) = 0;
}
}
}
void kfusion::device::truncateDepth(Depth& depth, float max_dist /*meters*/)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
hipLaunchKernelGGL(( truncate_depth_kernel), dim3(grid), dim3(block), 0, 0, depth, static_cast<ushort>(max_dist * 1000.f));
cudaSafeCall ( hipGetLastError() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Build depth pyramid
namespace kfusion
{
namespace device
{
__global__ void pyramid_kernel(const PtrStepSz<ushort> src, PtrStepSz<ushort> dst, float sigma_depth_mult3)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dst.cols || y >= dst.rows)
return;
const int D = 5;
int center = src(2 * y, 2 * x); //--
int tx = min (2 * x - D / 2 + D, src.cols - 1);
int ty = min (2 * y - D / 2 + D, src.rows - 1);
int cy = max (0, 2 * y - D / 2);
int cx = max (0, 2 * x - D / 2);
int sum = 0;
int count = 0;
for (; cy < ty; ++cy)
for (; cx < tx; ++cx)
{
int val = src(cy, cx);
if (abs (val - center) < sigma_depth_mult3)
{
sum += val;
++count;
}
}
dst(y, x) = (count == 0) ? 0 : sum / count;
}
}
}
void kfusion::device::depthPyr(const Depth& source, Depth& pyramid, float sigma_depth)
{
sigma_depth *= 1000; // meters -> mm
dim3 block (32, 8);
dim3 grid (divUp(pyramid.cols(), block.x), divUp(pyramid.rows(), block.y)); //divUpsafe_call.hpp
hipLaunchKernelGGL(( pyramid_kernel), dim3(grid), dim3(block), 0, 0, source, pyramid, sigma_depth * 3);
cudaSafeCall ( hipGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute normals
namespace kfusion
{
namespace device
{
__global__ void compute_normals_kernel(const PtrStepSz<ushort> depth, const Reprojector reproj, PtrStep<Normal> normals)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
Normal n_out = make_float4(qnan, qnan, qnan, 0.f);
if (x < depth.cols - 1 && y < depth.rows - 1)
{
//mm -> meters
float z00 = depth(y, x) * 0.001f;
float z01 = depth(y, x+1) * 0.001f;
float z10 = depth(y+1, x) * 0.001f;
if (z00 * z01 * z10 != 0)
{
float3 v00 = reproj(x, y, z00);
float3 v01 = reproj(x+1, y, z01);
float3 v10 = reproj(x, y+1, z10);
float3 n = normalized( cross (v01 - v00, v10 - v00) );
n_out = make_float4(-n.x, -n.y, -n.z, 0.f);
}
}
normals(y, x) = n_out;
}
__global__ void mask_depth_kernel(const PtrStep<Normal> normals, PtrStepSz<ushort> depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < depth.cols || y < depth.rows)
{
float4 n = normals(y, x);
if (isnan(n.x))
depth(y, x) = 0;
}
}
}
}
void kfusion::device::computeNormalsAndMaskDepth(const Reprojector& reproj, Depth& depth, Normals& normals)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
hipLaunchKernelGGL(( compute_normals_kernel), dim3(grid), dim3(block), 0, 0, depth, reproj, normals);
cudaSafeCall ( hipGetLastError () );
hipLaunchKernelGGL(( mask_depth_kernel), dim3(grid), dim3(block), 0, 0, normals, depth);
cudaSafeCall ( hipGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute computePointNormals
namespace kfusion
{
namespace device
{
__global__ void points_normals_kernel(const Reprojector reproj, const PtrStepSz<ushort> depth, PtrStep<Point> points, PtrStep<Normal> normals)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN (); // --
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
if (x >= depth.cols - 1 || y >= depth.rows - 1)
return;
//mm -> meters
float z00 = depth(y, x) * 0.001f;
float z01 = depth(y, x+1) * 0.001f;
float z10 = depth(y+1, x) * 0.001f;
if (z00 * z01 * z10 != 0)
{
// --
float3 v00 = reproj(x, y, z00); //, m
float3 v01 = reproj(x+1, y, z01);
float3 v10 = reproj(x, y+1, z10);
float3 n = normalized( cross (v01 - v00, v10 - v00) );
normals(y, x) = make_float4(-n.x, -n.y, -n.z, 0.f); //--
points(y, x) = make_float4(v00.x, v00.y, v00.z, 0.f);
}
}
}
}
void kfusion::device::computePointNormals(const Reprojector& reproj, const Depth& depth, Points& points, Normals& normals)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
hipLaunchKernelGGL(( points_normals_kernel), dim3(grid), dim3(block), 0, 0, reproj, depth, points, normals);
cudaSafeCall ( hipGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute dists
namespace kfusion
{
namespace device
{
__global__ void compute_dists_kernel(const PtrStepSz<ushort> depth, Dists dists, float2 finv, float2 c) //finv: 1/fx,1/fy
{
int x = threadIdx.x + blockIdx.x * blockDim.x; //threadIdxcuda
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < depth.cols || y < depth.rows)
{
float xl = (x - c.x) * finv.x; //X m
float yl = (y - c.y) * finv.y;
float lambda = sqrtf (xl * xl + yl * yl + 1);
//Z--
dists(y, x) = __float2half_rn(depth(y, x) * lambda * 0.001f); //meters __float2half_rn
}
}
}
}
void kfusion::device::compute_dists(const Depth& depth, Dists dists, float2 f, float2 c)
{
dim3 block (32, 8);//
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y)); //divUp: (depth.cols () + block.x - 1) / block.x;
hipLaunchKernelGGL(( compute_dists_kernel), dim3(grid), dim3(block), 0, 0, depth, dists, make_float2(1.f/f.x, 1.f/f.y), c);
cudaSafeCall ( hipGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void resize_depth_normals_kernel(const PtrStep<ushort> dsrc, const PtrStep<float4> nsrc, PtrStepSz<ushort> ddst, PtrStep<float4> ndst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= ddst.cols || y >= ddst.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
ushort d = 0;
float4 n = make_float4(qnan, qnan, qnan, qnan);
int xs = x * 2;
int ys = y * 2;
int d00 = dsrc(ys+0, xs+0);
int d01 = dsrc(ys+0, xs+1);
int d10 = dsrc(ys+1, xs+0);
int d11 = dsrc(ys+1, xs+1);
if (d00 * d01 != 0 && d10 * d11 != 0)
{
d = (d00 + d01 + d10 + d11)/4;
float4 n00 = nsrc(ys+0, xs+0);
float4 n01 = nsrc(ys+0, xs+1);
float4 n10 = nsrc(ys+1, xs+0);
float4 n11 = nsrc(ys+1, xs+1);
n.x = (n00.x + n01.x + n10.x + n11.x)*0.25;
n.y = (n00.y + n01.y + n10.y + n11.y)*0.25;
n.z = (n00.z + n01.z + n10.z + n11.z)*0.25;
}
ddst(y, x) = d;
ndst(y, x) = n;
}
}
}
void kfusion::device::resizeDepthNormals(const Depth& depth, const Normals& normals, Depth& depth_out, Normals& normals_out)
{
int in_cols = depth.cols ();
int in_rows = depth.rows ();
int out_cols = in_cols / 2;
int out_rows = in_rows / 2;
dim3 block (32, 8);
dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y));
hipLaunchKernelGGL(( resize_depth_normals_kernel), dim3(grid), dim3(block), 0, 0, depth, normals, depth_out, normals_out);
cudaSafeCall ( hipGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void resize_points_normals_kernel(const PtrStep<Point> vsrc, const PtrStep<Normal> nsrc, PtrStepSz<Point> vdst, PtrStep<Normal> ndst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= vdst.cols || y >= vdst.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
vdst(y, x) = ndst(y, x) = make_float4(qnan, qnan, qnan, 0.f);
int xs = x * 2;
int ys = y * 2;
float3 d00 = tr(vsrc(ys+0, xs+0));
float3 d01 = tr(vsrc(ys+0, xs+1));
float3 d10 = tr(vsrc(ys+1, xs+0));
float3 d11 = tr(vsrc(ys+1, xs+1));
if (!isnan(d00.x * d01.x * d10.x * d11.x))
{
float3 d = (d00 + d01 + d10 + d11) * 0.25f;
vdst(y, x) = make_float4(d.x, d.y, d.z, 0.f);
float3 n00 = tr(nsrc(ys+0, xs+0));
float3 n01 = tr(nsrc(ys+0, xs+1));
float3 n10 = tr(nsrc(ys+1, xs+0));
float3 n11 = tr(nsrc(ys+1, xs+1));
float3 n = (n00 + n01 + n10 + n11)*0.25f;
ndst(y, x) = make_float4(n.x, n.y, n.z, 0.f);
}
}
}
}
void kfusion::device::resizePointsNormals(const Points& points, const Normals& normals, Points& points_out, Normals& normals_out)
{
int out_cols = points.cols () / 2;
int out_rows = points.rows () / 2;
dim3 block (32, 8);
dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y));
hipLaunchKernelGGL(( resize_points_normals_kernel), dim3(grid), dim3(block), 0, 0, points, normals, points_out, normals_out);
cudaSafeCall ( hipGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void render_image_kernel(const PtrStep<ushort> depth, const PtrStep<Normal> normals,
const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= dst.cols || y >= dst.rows)
return;
float3 color;
int d = depth(y,x);
if (d == 0)
{
const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f);
const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f);
float w = static_cast<float>(y) / dst.rows;
color = bgr1 * (1 - w) + bgr2 * w;
}
else
{
float3 P = reproj(x, y, d * 0.001f);
float3 N = tr(normals(y,x));
const float Ka = 0.3f; //ambient coeff
const float Kd = 0.5f; //diffuse coeff
const float Ks = 0.2f; //specular coeff
const float n = 20.f; //specular power
const float Ax = 1.f; //ambient color, can be RGB
const float Dx = 1.f; //diffuse color, can be RGB
const float Sx = 1.f; //specular color, can be RGB
const float Lx = 1.f; //light color
//Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n]
float3 L = normalized(light_pose - P);
float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P);
float3 R = normalized(2 * N * dot(N, L) - L);
float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n);
color = make_float3(Ix, Ix, Ix);
}
uchar4 out;
out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f);
out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f);
out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f);
out.w = 0;
dst(y, x) = out;
}
__global__ void render_image_kernel(const PtrStep<Point> points, const PtrStep<Normal> normals,
const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= dst.cols || y >= dst.rows)
return;
float3 color;
float3 p = tr(points(y,x));
if (isnan(p.x))
{
const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f);
const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f);
float w = static_cast<float>(y) / dst.rows;
color = bgr1 * (1 - w) + bgr2 * w;
}
else
{
float3 P = p;
float3 N = tr(normals(y,x));
const float Ka = 0.3f; //ambient coeff
const float Kd = 0.5f; //diffuse coeff
const float Ks = 0.2f; //specular coeff
const float n = 20.f; //specular power
const float Ax = 1.f; //ambient color, can be RGB
const float Dx = 1.f; //diffuse color, can be RGB
const float Sx = 1.f; //specular color, can be RGB
const float Lx = 1.f; //light color
//Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n]
float3 L = normalized(light_pose - P);
float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P);
float3 R = normalized(2 * N * dot(N, L) - L);
float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n);
color = make_float3(Ix, Ix, Ix);
}
uchar4 out;
out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f);
out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f);
out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f);
out.w = 0;
dst(y, x) = out;
}
}
}
void kfusion::device::renderImage(const Depth& depth, const Normals& normals, const Reprojector& reproj, const float3& light_pose, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y));
hipLaunchKernelGGL(( render_image_kernel), dim3(grid), dim3(block), 0, 0, (PtrStep<ushort>)depth, normals, reproj, light_pose, image);
cudaSafeCall ( hipGetLastError () );
}
void kfusion::device::renderImage(const Points& points, const Normals& normals, const Reprojector& reproj, const Vec3f& light_pose, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y));
hipLaunchKernelGGL(( render_image_kernel), dim3(grid), dim3(block), 0, 0, (PtrStep<Point>)points, normals, reproj, light_pose, image);
cudaSafeCall ( hipGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void tangent_colors_kernel(PtrStepSz<Normal> normals, PtrStep<uchar4> colors)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= normals.cols || y >= normals.rows)
return;
float4 n = normals(y, x);
#if 0
unsigned char r = static_cast<unsigned char>(__saturatef((-n.x + 1.f)/2.f) * 255.f);
unsigned char g = static_cast<unsigned char>(__saturatef((-n.y + 1.f)/2.f) * 255.f);
unsigned char b = static_cast<unsigned char>(__saturatef((-n.z + 1.f)/2.f) * 255.f);
#else
unsigned char r = static_cast<unsigned char>((5.f - n.x * 3.5f) * 25.5f);
unsigned char g = static_cast<unsigned char>((5.f - n.y * 2.5f) * 25.5f);
unsigned char b = static_cast<unsigned char>((5.f - n.z * 3.5f) * 25.5f);
#endif
colors(y, x) = make_uchar4(b, g, r, 0);
}
}
}
void kfusion::device::renderTangentColors(const Normals& normals, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (normals.cols(), block.x), divUp (normals.rows(), block.y));
hipLaunchKernelGGL(( tangent_colors_kernel), dim3(grid), dim3(block), 0, 0, normals, image);
cudaSafeCall ( hipGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void mergePointNormalKernel (const Point* cloud, const float8* normals, PtrSz<float12> output)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < output.size)
{
float4 p = cloud[idx];
float8 n = normals[idx];
float12 o;
o.x = p.x;
o.y = p.y;
o.z = p.z;
o.normal_x = n.x;
o.normal_y = n.y;
o.normal_z = n.z;
output.data[idx] = o;
}
}
}
}
void kfusion::device::mergePointNormal (const DeviceArray<Point>& cloud, const DeviceArray<float8>& normals, const DeviceArray<float12>& output)
{
const int block = 256;
int total = (int)output.size ();
hipLaunchKernelGGL(( mergePointNormalKernel), dim3(divUp (total, block)), dim3(block), 0, 0, cloud, normals, output);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
| 6751d751e461f51f426855786c7ef3acb3febce3.cu | #include "device.hpp"
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Depth bilateral filter
namespace kfusion
{
namespace device
{
__global__ void bilateral_kernel(const PtrStepSz<ushort> src, PtrStep<ushort> dst, const int ksz, const float sigma_spatial2_inv_half, const float sigma_depth2_inv_half)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= src.cols || y >= src.rows)
return;
int value = src(y, x);
int tx = min (x - ksz / 2 + ksz, src.cols - 1);
int ty = min (y - ksz / 2 + ksz, src.rows - 1);
float sum1 = 0;
float sum2 = 0;
//具体原理可以参考:https://blog.csdn.net/piaoxuezhong/article/details/78302920
for (int cy = max (y - ksz / 2, 0); cy < ty; ++cy)
{
for (int cx = max (x - ksz / 2, 0); cx < tx; ++cx)
{
int depth = src(cy, cx);
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy); //计算像素之间的距离
float color2 = (value - depth) * (value - depth); //计算像素值的距离
float weight = __expf (-(space2 * sigma_spatial2_inv_half + color2 * sigma_depth2_inv_half)); //e的指数
sum1 += depth * weight;
sum2 += weight;
}
}
dst(y, x) = __float2int_rn (sum1 / sum2);
}
}
}
void kfusion::device::bilateralFilter (const Depth& src, Depth& dst, int kernel_size, float sigma_spatial, float sigma_depth) //kernel_size:7 sigma_depth:0.04 sigma_spatial:4.5
{
sigma_depth *= 1000; // meters -> mm
dim3 block (32, 8);
dim3 grid (divUp (src.cols (), block.x), divUp (src.rows (), block.y));
cudaSafeCall( cudaFuncSetCacheConfig (bilateral_kernel, cudaFuncCachePreferL1) ); //配置缓存的东西 --
bilateral_kernel<<<grid, block>>>(src, dst, kernel_size, 0.5f / (sigma_spatial * sigma_spatial), 0.5f / (sigma_depth * sigma_depth));
cudaSafeCall ( cudaGetLastError () );
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Depth truncation
///这个程序没调用
namespace kfusion
{
namespace device
{
__global__ void truncate_depth_kernel(PtrStepSz<ushort> depth, ushort max_dist /*mm*/)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < depth.cols && y < depth.rows)
if(depth(y, x) > max_dist)
depth(y, x) = 0;
}
}
}
void kfusion::device::truncateDepth(Depth& depth, float max_dist /*meters*/)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
truncate_depth_kernel<<<grid, block>>>(depth, static_cast<ushort>(max_dist * 1000.f));
cudaSafeCall ( cudaGetLastError() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Build depth pyramid
namespace kfusion
{
namespace device
{
__global__ void pyramid_kernel(const PtrStepSz<ushort> src, PtrStepSz<ushort> dst, float sigma_depth_mult3)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dst.cols || y >= dst.rows)
return;
const int D = 5;
int center = src(2 * y, 2 * x); //得到某个像素的像素值--
int tx = min (2 * x - D / 2 + D, src.cols - 1);
int ty = min (2 * y - D / 2 + D, src.rows - 1);
int cy = max (0, 2 * y - D / 2);
int cx = max (0, 2 * x - D / 2);
int sum = 0;
int count = 0;
for (; cy < ty; ++cy)
for (; cx < tx; ++cx)
{
int val = src(cy, cx);
if (abs (val - center) < sigma_depth_mult3)
{
sum += val;
++count;
}
}
dst(y, x) = (count == 0) ? 0 : sum / count;
}
}
}
void kfusion::device::depthPyr(const Depth& source, Depth& pyramid, float sigma_depth)
{
sigma_depth *= 1000; // meters -> mm
dim3 block (32, 8);
dim3 grid (divUp(pyramid.cols(), block.x), divUp(pyramid.rows(), block.y)); //divUp在safe_call.hpp中
pyramid_kernel<<<grid, block>>>(source, pyramid, sigma_depth * 3);
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute normals
namespace kfusion
{
namespace device
{
__global__ void compute_normals_kernel(const PtrStepSz<ushort> depth, const Reprojector reproj, PtrStep<Normal> normals)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
Normal n_out = make_float4(qnan, qnan, qnan, 0.f);
if (x < depth.cols - 1 && y < depth.rows - 1)
{
//mm -> meters
float z00 = depth(y, x) * 0.001f;
float z01 = depth(y, x+1) * 0.001f;
float z10 = depth(y+1, x) * 0.001f;
if (z00 * z01 * z10 != 0)
{
float3 v00 = reproj(x, y, z00);
float3 v01 = reproj(x+1, y, z01);
float3 v10 = reproj(x, y+1, z10);
float3 n = normalized( cross (v01 - v00, v10 - v00) );
n_out = make_float4(-n.x, -n.y, -n.z, 0.f);
}
}
normals(y, x) = n_out;
}
__global__ void mask_depth_kernel(const PtrStep<Normal> normals, PtrStepSz<ushort> depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < depth.cols || y < depth.rows)
{
float4 n = normals(y, x);
if (isnan(n.x))
depth(y, x) = 0;
}
}
}
}
void kfusion::device::computeNormalsAndMaskDepth(const Reprojector& reproj, Depth& depth, Normals& normals)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
compute_normals_kernel<<<grid, block>>>(depth, reproj, normals);
cudaSafeCall ( cudaGetLastError () );
mask_depth_kernel<<<grid, block>>>(normals, depth);
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute computePointNormals
namespace kfusion
{
namespace device
{
__global__ void points_normals_kernel(const Reprojector reproj, const PtrStepSz<ushort> depth, PtrStep<Point> points, PtrStep<Normal> normals)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN (); //没什么特别的意义 --
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
if (x >= depth.cols - 1 || y >= depth.rows - 1)
return;
//mm -> meters
float z00 = depth(y, x) * 0.001f;
float z01 = depth(y, x+1) * 0.001f;
float z10 = depth(y+1, x) * 0.001f;
if (z00 * z01 * z10 != 0)
{
//计算出来空间三个三维点 --
float3 v00 = reproj(x, y, z00); //相机坐标系下的三维坐标,单位是 m
float3 v01 = reproj(x+1, y, z01);
float3 v10 = reproj(x, y+1, z10);
float3 n = normalized( cross (v01 - v00, v10 - v00) );
normals(y, x) = make_float4(-n.x, -n.y, -n.z, 0.f); //为什么要加一个负号??--
points(y, x) = make_float4(v00.x, v00.y, v00.z, 0.f);
}
}
}
}
void kfusion::device::computePointNormals(const Reprojector& reproj, const Depth& depth, Points& points, Normals& normals)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
points_normals_kernel<<<grid, block>>>(reproj, depth, points, normals);
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute dists
namespace kfusion
{
namespace device
{
__global__ void compute_dists_kernel(const PtrStepSz<ushort> depth, Dists dists, float2 finv, float2 c) //finv: 1/fx,1/fy
{
int x = threadIdx.x + blockIdx.x * blockDim.x; //threadIdx是cuda的东西
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < depth.cols || y < depth.rows)
{
float xl = (x - c.x) * finv.x; //归一化平面上对应的X ,相机光心坐标系,m为单位
float yl = (y - c.y) * finv.y;
float lambda = sqrtf (xl * xl + yl * yl + 1);
//算出来的是一个距离信息,不是Z坐标,是距离--
dists(y, x) = __float2half_rn(depth(y, x) * lambda * 0.001f); //meters __float2half_rn:单精度转换为半精度
}
}
}
}
void kfusion::device::compute_dists(const Depth& depth, Dists dists, float2 f, float2 c)
{
dim3 block (32, 8);// 二维线程块
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y)); //divUp: (depth.cols () + block.x - 1) / block.x;
compute_dists_kernel<<<grid, block>>>(depth, dists, make_float2(1.f/f.x, 1.f/f.y), c);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void resize_depth_normals_kernel(const PtrStep<ushort> dsrc, const PtrStep<float4> nsrc, PtrStepSz<ushort> ddst, PtrStep<float4> ndst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= ddst.cols || y >= ddst.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
ushort d = 0;
float4 n = make_float4(qnan, qnan, qnan, qnan);
int xs = x * 2;
int ys = y * 2;
int d00 = dsrc(ys+0, xs+0);
int d01 = dsrc(ys+0, xs+1);
int d10 = dsrc(ys+1, xs+0);
int d11 = dsrc(ys+1, xs+1);
if (d00 * d01 != 0 && d10 * d11 != 0)
{
d = (d00 + d01 + d10 + d11)/4;
float4 n00 = nsrc(ys+0, xs+0);
float4 n01 = nsrc(ys+0, xs+1);
float4 n10 = nsrc(ys+1, xs+0);
float4 n11 = nsrc(ys+1, xs+1);
n.x = (n00.x + n01.x + n10.x + n11.x)*0.25;
n.y = (n00.y + n01.y + n10.y + n11.y)*0.25;
n.z = (n00.z + n01.z + n10.z + n11.z)*0.25;
}
ddst(y, x) = d;
ndst(y, x) = n;
}
}
}
void kfusion::device::resizeDepthNormals(const Depth& depth, const Normals& normals, Depth& depth_out, Normals& normals_out)
{
int in_cols = depth.cols ();
int in_rows = depth.rows ();
int out_cols = in_cols / 2;
int out_rows = in_rows / 2;
dim3 block (32, 8);
dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y));
resize_depth_normals_kernel<<<grid, block>>>(depth, normals, depth_out, normals_out);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void resize_points_normals_kernel(const PtrStep<Point> vsrc, const PtrStep<Normal> nsrc, PtrStepSz<Point> vdst, PtrStep<Normal> ndst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= vdst.cols || y >= vdst.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
vdst(y, x) = ndst(y, x) = make_float4(qnan, qnan, qnan, 0.f);
int xs = x * 2;
int ys = y * 2;
float3 d00 = tr(vsrc(ys+0, xs+0));
float3 d01 = tr(vsrc(ys+0, xs+1));
float3 d10 = tr(vsrc(ys+1, xs+0));
float3 d11 = tr(vsrc(ys+1, xs+1));
if (!isnan(d00.x * d01.x * d10.x * d11.x))
{
float3 d = (d00 + d01 + d10 + d11) * 0.25f;
vdst(y, x) = make_float4(d.x, d.y, d.z, 0.f);
float3 n00 = tr(nsrc(ys+0, xs+0));
float3 n01 = tr(nsrc(ys+0, xs+1));
float3 n10 = tr(nsrc(ys+1, xs+0));
float3 n11 = tr(nsrc(ys+1, xs+1));
float3 n = (n00 + n01 + n10 + n11)*0.25f;
ndst(y, x) = make_float4(n.x, n.y, n.z, 0.f);
}
}
}
}
void kfusion::device::resizePointsNormals(const Points& points, const Normals& normals, Points& points_out, Normals& normals_out)
{
int out_cols = points.cols () / 2;
int out_rows = points.rows () / 2;
dim3 block (32, 8);
dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y));
resize_points_normals_kernel<<<grid, block>>>(points, normals, points_out, normals_out);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void render_image_kernel(const PtrStep<ushort> depth, const PtrStep<Normal> normals,
const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= dst.cols || y >= dst.rows)
return;
float3 color;
int d = depth(y,x);
if (d == 0)
{
const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f);
const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f);
float w = static_cast<float>(y) / dst.rows;
color = bgr1 * (1 - w) + bgr2 * w;
}
else
{
float3 P = reproj(x, y, d * 0.001f);
float3 N = tr(normals(y,x));
const float Ka = 0.3f; //ambient coeff
const float Kd = 0.5f; //diffuse coeff
const float Ks = 0.2f; //specular coeff
const float n = 20.f; //specular power
const float Ax = 1.f; //ambient color, can be RGB
const float Dx = 1.f; //diffuse color, can be RGB
const float Sx = 1.f; //specular color, can be RGB
const float Lx = 1.f; //light color
//Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n]
float3 L = normalized(light_pose - P);
float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P);
float3 R = normalized(2 * N * dot(N, L) - L);
float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n);
color = make_float3(Ix, Ix, Ix);
}
uchar4 out;
out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f);
out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f);
out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f);
out.w = 0;
dst(y, x) = out;
}
__global__ void render_image_kernel(const PtrStep<Point> points, const PtrStep<Normal> normals,
const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= dst.cols || y >= dst.rows)
return;
float3 color;
float3 p = tr(points(y,x));
if (isnan(p.x))
{
const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f);
const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f);
float w = static_cast<float>(y) / dst.rows;
color = bgr1 * (1 - w) + bgr2 * w;
}
else
{
float3 P = p;
float3 N = tr(normals(y,x));
const float Ka = 0.3f; //ambient coeff
const float Kd = 0.5f; //diffuse coeff
const float Ks = 0.2f; //specular coeff
const float n = 20.f; //specular power
const float Ax = 1.f; //ambient color, can be RGB
const float Dx = 1.f; //diffuse color, can be RGB
const float Sx = 1.f; //specular color, can be RGB
const float Lx = 1.f; //light color
//Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n]
float3 L = normalized(light_pose - P);
float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P);
float3 R = normalized(2 * N * dot(N, L) - L);
float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n);
color = make_float3(Ix, Ix, Ix);
}
uchar4 out;
out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f);
out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f);
out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f);
out.w = 0;
dst(y, x) = out;
}
}
}
void kfusion::device::renderImage(const Depth& depth, const Normals& normals, const Reprojector& reproj, const float3& light_pose, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y));
render_image_kernel<<<grid, block>>>((PtrStep<ushort>)depth, normals, reproj, light_pose, image);
cudaSafeCall ( cudaGetLastError () );
}
void kfusion::device::renderImage(const Points& points, const Normals& normals, const Reprojector& reproj, const Vec3f& light_pose, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y));
render_image_kernel<<<grid, block>>>((PtrStep<Point>)points, normals, reproj, light_pose, image);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void tangent_colors_kernel(PtrStepSz<Normal> normals, PtrStep<uchar4> colors)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= normals.cols || y >= normals.rows)
return;
float4 n = normals(y, x);
#if 0
unsigned char r = static_cast<unsigned char>(__saturatef((-n.x + 1.f)/2.f) * 255.f);
unsigned char g = static_cast<unsigned char>(__saturatef((-n.y + 1.f)/2.f) * 255.f);
unsigned char b = static_cast<unsigned char>(__saturatef((-n.z + 1.f)/2.f) * 255.f);
#else
unsigned char r = static_cast<unsigned char>((5.f - n.x * 3.5f) * 25.5f);
unsigned char g = static_cast<unsigned char>((5.f - n.y * 2.5f) * 25.5f);
unsigned char b = static_cast<unsigned char>((5.f - n.z * 3.5f) * 25.5f);
#endif
colors(y, x) = make_uchar4(b, g, r, 0);
}
}
}
void kfusion::device::renderTangentColors(const Normals& normals, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (normals.cols(), block.x), divUp (normals.rows(), block.y));
tangent_colors_kernel<<<grid, block>>>(normals, image);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void mergePointNormalKernel (const Point* cloud, const float8* normals, PtrSz<float12> output)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < output.size)
{
float4 p = cloud[idx];
float8 n = normals[idx];
float12 o;
o.x = p.x;
o.y = p.y;
o.z = p.z;
o.normal_x = n.x;
o.normal_y = n.y;
o.normal_z = n.z;
output.data[idx] = o;
}
}
}
}
void kfusion::device::mergePointNormal (const DeviceArray<Point>& cloud, const DeviceArray<float8>& normals, const DeviceArray<float12>& output)
{
const int block = 256;
int total = (int)output.size ();
mergePointNormalKernel<<<divUp (total, block), block>>>(cloud, normals, output);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
|
0544ba5ce279db37e6ab3ec1ab7c8a8e128dc4d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
The CUDA Kernel for Applying BFS on a loaded Graph. Created By Pawan Harish
**********************************************************************************/
#ifndef _KERNEL2_H_
#define _KERNEL2_H_
__global__ void
Kernel2( int* g_graph_mask, int *g_updating_graph_mask, int* g_graph_visited, int *g_over, int no_of_nodes)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if( tid<no_of_nodes && g_updating_graph_mask[tid])
{
g_graph_mask[tid]=true;
g_graph_visited[tid]=true;
*g_over=true;
g_updating_graph_mask[tid]=false;
}
}
#endif
| 0544ba5ce279db37e6ab3ec1ab7c8a8e128dc4d6.cu | /*********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
The CUDA Kernel for Applying BFS on a loaded Graph. Created By Pawan Harish
**********************************************************************************/
#ifndef _KERNEL2_H_
#define _KERNEL2_H_
__global__ void
Kernel2( int* g_graph_mask, int *g_updating_graph_mask, int* g_graph_visited, int *g_over, int no_of_nodes)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if( tid<no_of_nodes && g_updating_graph_mask[tid])
{
g_graph_mask[tid]=true;
g_graph_visited[tid]=true;
*g_over=true;
g_updating_graph_mask[tid]=false;
}
}
#endif
|
c53225506544780ba6af252871b42679829e072d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <tests/utilities/cudf_test_fixtures.h>
#include <utilities/type_dispatcher.hpp>
#include <cudf/cudf.h>
#include <thrust/device_vector.h>
#include <gtest/gtest.h>
#include <cstdint>
/**
* @file dispatcher_test.cu
* @brief Tests the type_dispatcher
*/
struct DispatcherTest : public GdfTest {
/**---------------------------------------------------------------------------*
* @brief Lists every gdf_dtype that the type_dispatcher supports.
*
* This vector *must* list every gdf_dtype supported by the type_dispatcher.
*
* If a new type gdf_dtype is added, but this list is not updated, then the
* tests will fail.
*
*---------------------------------------------------------------------------**/
std::vector<gdf_dtype> supported_dtypes{
GDF_INT8, GDF_INT16, GDF_INT32, GDF_INT64,
GDF_FLOAT32, GDF_FLOAT64, GDF_DATE32, GDF_DATE64,
GDF_TIMESTAMP, GDF_CATEGORY, GDF_STRING_CATEGORY, GDF_BOOL8};
// These types are not supported by the type_dispatcher
std::vector<gdf_dtype> unsupported_dtypes{GDF_invalid, GDF_STRING};
};
using TestTypes = ::testing::Types<int8_t, int16_t, int32_t, int64_t, float,
double, cudf::date32, cudf::date64,
cudf::timestamp, cudf::category,
cudf::nvstring_category, cudf::bool8>;
template <typename T>
struct TypedDispatcherTest : DispatcherTest {};
TYPED_TEST_CASE(TypedDispatcherTest, TestTypes);
namespace {
template <typename ExpectedType>
struct type_tester {
template <typename DispatchedType>
bool operator()() {
return std::is_same<ExpectedType, DispatchedType>::value;
}
};
} // namespace
// Ensure that the type_to_gdf_dtype trait maps to the correct gdf_dtype
TYPED_TEST(TypedDispatcherTest, TraitsTest) {
EXPECT_TRUE(cudf::type_dispatcher(cudf::gdf_dtype_of<TypeParam>(),
type_tester<TypeParam>{}));
}
TEST_F(DispatcherTest, NumberOfTypesTest) {
// N_GDF_TYPES indicates how many enums there are in `gdf_dtype`,
// therefore, if a gdf_dtype is added without updating this test, the test
// will fail
const size_t expected_num_supported_dtypes =
N_GDF_TYPES - unsupported_dtypes.size();
// Note: If this test fails, that means a type was added to gdf_dtype
// without adding it to the `supported_dtypes` list in this test fixture
ASSERT_EQ(expected_num_supported_dtypes, supported_dtypes.size())
<< "Number of supported types does not match what was expected.";
}
namespace {
struct test_functor {
template <typename T>
__host__ __device__ bool operator()(gdf_dtype type_id) {
return (type_id == cudf::gdf_dtype_of<T>());
}
};
__global__ void dispatch_test_kernel(gdf_dtype type, bool* d_result) {
if (0 == threadIdx.x + blockIdx.x * blockDim.x)
*d_result = cudf::type_dispatcher(type, test_functor{}, type);
}
} // namespace
// Every supported gdf_dtype should dispatch the correct type
TEST_F(DispatcherTest, HostDispatchFunctor) {
for (auto const& t : this->supported_dtypes) {
bool result = cudf::type_dispatcher(t, test_functor{}, t);
EXPECT_TRUE(result);
}
}
TEST_F(DispatcherTest, DeviceDispatchFunctor) {
thrust::device_vector<bool> result(1);
for (auto const& t : this->supported_dtypes) {
hipLaunchKernelGGL(( dispatch_test_kernel), dim3(1), dim3(1), 0, 0, t, result.data().get());
hipDeviceSynchronize();
EXPECT_EQ(true, result[0]);
}
}
// Unsuported gdf_dtypes should throw std::runtime_error in host code
TEST_F(DispatcherTest, UnsuportedTypesTest) {
for (auto const& t : unsupported_dtypes) {
EXPECT_THROW(cudf::type_dispatcher(t, test_functor{}, t),
std::runtime_error);
}
}
using DispatcherDeathTest = DispatcherTest;
// Unsuported gdf_dtypes in device code should set appropriate error code
// and invalidates device context
TEST_F(DispatcherDeathTest, DeviceDispatchFunctor) {
testing::FLAGS_gtest_death_test_style = "threadsafe";
thrust::device_vector<bool> result(1);
auto call_kernel = [&result](gdf_dtype t) {
hipLaunchKernelGGL(( dispatch_test_kernel), dim3(1), dim3(1), 0, 0, t, result.data().get());
auto error_code = hipDeviceSynchronize();
// Kernel should fail with `hipErrorAssert` on an unsupported gdf_dtype
// This error invalidates the current device context, so we need to kill
// the current process. Running with EXPECT_DEATH spawns a new process for
// each attempted kernel launch
EXPECT_EQ(hipErrorAssert, error_code);
exit(-1);
};
for (auto const& t : unsupported_dtypes) {
EXPECT_DEATH(call_kernel(t), "");
}
}
| c53225506544780ba6af252871b42679829e072d.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <tests/utilities/cudf_test_fixtures.h>
#include <utilities/type_dispatcher.hpp>
#include <cudf/cudf.h>
#include <thrust/device_vector.h>
#include <gtest/gtest.h>
#include <cstdint>
/**
* @file dispatcher_test.cu
* @brief Tests the type_dispatcher
*/
struct DispatcherTest : public GdfTest {
/**---------------------------------------------------------------------------*
* @brief Lists every gdf_dtype that the type_dispatcher supports.
*
* This vector *must* list every gdf_dtype supported by the type_dispatcher.
*
* If a new type gdf_dtype is added, but this list is not updated, then the
* tests will fail.
*
*---------------------------------------------------------------------------**/
std::vector<gdf_dtype> supported_dtypes{
GDF_INT8, GDF_INT16, GDF_INT32, GDF_INT64,
GDF_FLOAT32, GDF_FLOAT64, GDF_DATE32, GDF_DATE64,
GDF_TIMESTAMP, GDF_CATEGORY, GDF_STRING_CATEGORY, GDF_BOOL8};
// These types are not supported by the type_dispatcher
std::vector<gdf_dtype> unsupported_dtypes{GDF_invalid, GDF_STRING};
};
using TestTypes = ::testing::Types<int8_t, int16_t, int32_t, int64_t, float,
double, cudf::date32, cudf::date64,
cudf::timestamp, cudf::category,
cudf::nvstring_category, cudf::bool8>;
template <typename T>
struct TypedDispatcherTest : DispatcherTest {};
TYPED_TEST_CASE(TypedDispatcherTest, TestTypes);
namespace {
template <typename ExpectedType>
struct type_tester {
template <typename DispatchedType>
bool operator()() {
return std::is_same<ExpectedType, DispatchedType>::value;
}
};
} // namespace
// Ensure that the type_to_gdf_dtype trait maps to the correct gdf_dtype
TYPED_TEST(TypedDispatcherTest, TraitsTest) {
EXPECT_TRUE(cudf::type_dispatcher(cudf::gdf_dtype_of<TypeParam>(),
type_tester<TypeParam>{}));
}
TEST_F(DispatcherTest, NumberOfTypesTest) {
// N_GDF_TYPES indicates how many enums there are in `gdf_dtype`,
// therefore, if a gdf_dtype is added without updating this test, the test
// will fail
const size_t expected_num_supported_dtypes =
N_GDF_TYPES - unsupported_dtypes.size();
// Note: If this test fails, that means a type was added to gdf_dtype
// without adding it to the `supported_dtypes` list in this test fixture
ASSERT_EQ(expected_num_supported_dtypes, supported_dtypes.size())
<< "Number of supported types does not match what was expected.";
}
namespace {
struct test_functor {
template <typename T>
__host__ __device__ bool operator()(gdf_dtype type_id) {
return (type_id == cudf::gdf_dtype_of<T>());
}
};
__global__ void dispatch_test_kernel(gdf_dtype type, bool* d_result) {
if (0 == threadIdx.x + blockIdx.x * blockDim.x)
*d_result = cudf::type_dispatcher(type, test_functor{}, type);
}
} // namespace
// Every supported gdf_dtype should dispatch the correct type
TEST_F(DispatcherTest, HostDispatchFunctor) {
for (auto const& t : this->supported_dtypes) {
bool result = cudf::type_dispatcher(t, test_functor{}, t);
EXPECT_TRUE(result);
}
}
TEST_F(DispatcherTest, DeviceDispatchFunctor) {
thrust::device_vector<bool> result(1);
for (auto const& t : this->supported_dtypes) {
dispatch_test_kernel<<<1, 1>>>(t, result.data().get());
cudaDeviceSynchronize();
EXPECT_EQ(true, result[0]);
}
}
// Unsuported gdf_dtypes should throw std::runtime_error in host code
TEST_F(DispatcherTest, UnsuportedTypesTest) {
for (auto const& t : unsupported_dtypes) {
EXPECT_THROW(cudf::type_dispatcher(t, test_functor{}, t),
std::runtime_error);
}
}
using DispatcherDeathTest = DispatcherTest;
// Unsuported gdf_dtypes in device code should set appropriate error code
// and invalidates device context
TEST_F(DispatcherDeathTest, DeviceDispatchFunctor) {
testing::FLAGS_gtest_death_test_style = "threadsafe";
thrust::device_vector<bool> result(1);
auto call_kernel = [&result](gdf_dtype t) {
dispatch_test_kernel<<<1, 1>>>(t, result.data().get());
auto error_code = cudaDeviceSynchronize();
// Kernel should fail with `cudaErrorAssert` on an unsupported gdf_dtype
// This error invalidates the current device context, so we need to kill
// the current process. Running with EXPECT_DEATH spawns a new process for
// each attempted kernel launch
EXPECT_EQ(cudaErrorAssert, error_code);
exit(-1);
};
for (auto const& t : unsupported_dtypes) {
EXPECT_DEATH(call_kernel(t), "");
}
}
|
01dc0095976b7459ab06073d68954ef59a404258.hip | // !!! This is a file automatically generated by hipify!!!
/*
* File misc.cuh contains helper functions of various kinds.
*/
#include <iostream>
#include <vector>
#include <cstring>
#include <math.h>
#include <limits>
#include <sstream>
#include "misc.cuh"
void allocateMemoryVoid(void** pointer, size_t allocSize) {
#ifdef __NVCC__
cudaSafeCall(hipMallocManaged(pointer, allocSize));
#else
*pointer = malloc(allocSize);
#endif
}
void freeMemoryVoid(void* pointer) {
#ifdef __NVCC__
cudaSafeCall(hipFree(pointer));
#else
free(pointer);
#endif
}
DEV void printArrayF(floating_t* arr, int n) {
printf("[ ");
for(int i = 0; i < n; i++)
printf("%f ", arr[i]);
printf("]\n");
}
DEV void printArrayI(int* arr, int n) {
printf("[ ");
for(int i = 0; i < n; i++)
printf("%d ", arr[i]);
printf("]\n");
}
| 01dc0095976b7459ab06073d68954ef59a404258.cu | /*
* File misc.cuh contains helper functions of various kinds.
*/
#include <iostream>
#include <vector>
#include <cstring>
#include <math.h>
#include <limits>
#include <sstream>
#include "misc.cuh"
void allocateMemoryVoid(void** pointer, size_t allocSize) {
#ifdef __NVCC__
cudaSafeCall(cudaMallocManaged(pointer, allocSize));
#else
*pointer = malloc(allocSize);
#endif
}
void freeMemoryVoid(void* pointer) {
#ifdef __NVCC__
cudaSafeCall(cudaFree(pointer));
#else
free(pointer);
#endif
}
DEV void printArrayF(floating_t* arr, int n) {
printf("[ ");
for(int i = 0; i < n; i++)
printf("%f ", arr[i]);
printf("]\n");
}
DEV void printArrayI(int* arr, int n) {
printf("[ ");
for(int i = 0; i < n; i++)
printf("%d ", arr[i]);
printf("]\n");
}
|
a5e373f4446b13c33e2f5a08a838afb16673e49f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
using paddle::platform::float16;
namespace paddle {
namespace operators {
// CUDA: index helpers
#define idx4_4(index, d1, d2, d3, d4) (index % d4)
#define idx4_3(index, d1, d2, d3, d4) ((index / d4) % d3)
#define idx4_2(index, d1, d2, d3, d4) ((index / d4 / d3) % d2)
#define idx4_1(index, d1, d2, d3, d4) ((index / d4 / d3 / d2) % d1)
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ bool GT_E(T a, T b) {
return (a > b) || Eigen::numext::abs(a - b) < 1e-4;
}
template <typename T>
__device__ bool LT_E(T a, T b) {
return (a < b) || Eigen::numext::abs(a - b) < 1e-4;
}
template <typename T>
__device__ bool GT(T a, T b) {
return (a - b) > 1e-4;
}
template <typename T>
__device__ T max(T a, T b) {
return a > b ? a : b;
}
template <typename T>
__device__ T min(T a, T b) {
return a < b ? a : b;
}
/*
* check if (x, y) is in the boundary of roi
*/
template <typename T>
__device__ bool in_quad(T x, T y, T roi_x[], T roi_y[]) {
for (int i = 0; i < 4; i++) {
T start_w = roi_x[i];
T start_h = roi_y[i];
T end_w = roi_x[(i + 1) % 4];
T end_h = roi_y[(i + 1) % 4];
if (fabs(start_h - end_h) < 1e-4) {
if (fabs(y - start_h) < 1e-4 && fabs(y - end_h) < 1e-4 &&
GT_E<T>(x, min<T>(start_w, end_w)) &&
LT_E<T>(x, max<T>(start_w, end_w))) {
return true;
}
} else {
T intersec_x =
(y - start_h) * (end_w - start_w) / (end_h - start_h) + start_w;
if (fabs(intersec_x - x) < 1e-4 && GT_E(y, min<T>(start_h, end_h)) &&
LT_E<T>(y, max<T>(start_h, end_h))) {
return true;
}
}
}
int n_cross = 0;
for (int i = 0; i < 4; i++) {
T start_w = roi_x[i];
T start_h = roi_y[i];
T end_w = roi_x[(i + 1) % 4];
T end_h = roi_y[(i + 1) % 4];
if (fabs(start_h - end_h) < 1e-4) {
continue;
}
if (LT_E<T>(y, min<T>(start_h, end_h)) ||
GT<T>(y, max<T>(start_h, end_h))) {
continue;
}
T intersec_x =
(y - start_h) * (end_w - start_w) / (end_h - start_h) + start_w;
if (fabs(intersec_x - x) < 1e-4) {
return true;
}
if (GT<T>(intersec_x, x)) {
n_cross++;
}
}
return (n_cross % 2 == 1);
}
/**
* Perform bilinear interpolation in the input feature map.
*/
template <typename T>
__device__ void bilinear_interpolate(const T* in_data, const int channels,
const int width, const int height,
int in_n, int in_c, T in_w, T in_h,
T* val) {
// Deal with cases that source coords are out of feature map boundary
if (GT<T>(-0.5, in_w) || GT<T>(in_w, width - 0.5) || GT<T>(-0.5, in_h) ||
GT<T>(in_h, height - 0.5)) {
val[0] = 0.0;
return;
}
if (GT<T>(0, in_w)) {
in_w = 0;
}
if (GT<T>(0, in_h)) {
in_h = 0;
}
int in_w_floor = floor(in_w);
int in_h_floor = floor(in_h);
int in_w_ceil;
int in_h_ceil;
if (GT_E<T>(in_w_floor, width - 1)) {
in_w_ceil = in_w_floor = width - 1;
in_w = static_cast<T>(in_w_floor);
} else {
in_w_ceil = in_w_floor + 1;
}
if (GT_E<T>(in_h_floor, height - 1)) {
in_h_ceil = in_h_floor = height - 1;
in_h = static_cast<T>(in_h_floor);
} else {
in_h_ceil = in_h_floor + 1;
}
T w_floor = in_w - in_w_floor;
T h_floor = in_h - in_h_floor;
T w_ceil = 1 - w_floor;
T h_ceil = 1 - h_floor;
const T* data = in_data + (in_n * channels + in_c) * height * width;
// Do bilinear interpolation
T v1 = data[in_h_floor * width + in_w_floor];
T v2 = data[in_h_ceil * width + in_w_floor];
T v3 = data[in_h_ceil * width + in_w_ceil];
T v4 = data[in_h_floor * width + in_w_ceil];
T w1 = w_ceil * h_ceil;
T w2 = w_ceil * h_floor;
T w3 = w_floor * h_floor;
T w4 = w_floor * h_ceil;
val[0] = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4;
}
/**
* Get the source coordinates in the input feature map.
*
* (u, v, w)^matrix = T * (out_w, out_h, 1)^matrix
*
* in_w = u / w
* in_h = v / w
*
*/
template <typename T>
__device__ void get_source_coords(T matrix[], int out_w, int out_h, T* in_w,
T* in_h) {
T u = matrix[0] * out_w + matrix[1] * out_h + matrix[2];
T v = matrix[3] * out_w + matrix[4] * out_h + matrix[5];
T w = matrix[6] * out_w + matrix[7] * out_h + matrix[8];
in_w[0] = u / w;
in_h[0] = v / w;
}
/**
* Get the matrix of perspective transform.
*
* dx1 = x1 - x2
* dx2 = x3 - x2
* dx3 = x0 - x1 + x2 - x3
* dy1 = y1 - y2
* dy2 = y3 - y2
* dy3 = y0 - y1 + y2 - y3
*
* a11 = (x1 - x0 + a31 * (w - 1) * x1) / (w - 1)
* a12 = (x3 - x0 + a32 * (h - 1) * x3) / (h - 1)
* a13 = x0
* a21 = (y1 - y0 + a31 * (w - 1) * y1) / (w - 1)
* a22 = (y3 - y0 + a32 * (h - 1) * y3) / (h - 1)
* a23 = y0
* a31 = (dx3 * dy2 - dx2 * dy3) / (dx1 * dy2 - dx2 * dy1) / (w - 1)
* a32 = (dx1 * dy3 - dx3 * dy1) / (dx1 * dy2 - dx2 * dy1) / (h - 1)
* a33 = 1
*
*/
template <typename T>
__device__ void get_transform_matrix(const int transformed_width,
const int transformed_height, T roi_x[],
T roi_y[], T matrix[]) {
T x0 = roi_x[0];
T x1 = roi_x[1];
T x2 = roi_x[2];
T x3 = roi_x[3];
T y0 = roi_y[0];
T y1 = roi_y[1];
T y2 = roi_y[2];
T y3 = roi_y[3];
// Estimate the height and width of RoI
T len1 = sqrt((x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1));
T len2 = sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2));
T len3 = sqrt((x2 - x3) * (x2 - x3) + (y2 - y3) * (y2 - y3));
T len4 = sqrt((x3 - x0) * (x3 - x0) + (y3 - y0) * (y3 - y0));
T estimated_height = (len2 + len4) / 2.0;
T estimated_width = (len1 + len3) / 2.0;
// Get the normalized height and normalized width
int normalized_height = transformed_height;
int normalized_width =
round(estimated_width * (normalized_height - 1) / estimated_height) + 1;
normalized_width = min(normalized_width, transformed_width);
T dx1 = x1 - x2;
T dx2 = x3 - x2;
T dx3 = x0 - x1 + x2 - x3;
T dy1 = y1 - y2;
T dy2 = y3 - y2;
T dy3 = y0 - y1 + y2 - y3;
matrix[6] = (dx3 * dy2 - dx2 * dy3) / (dx1 * dy2 - dx2 * dy1) /
(normalized_width - 1);
matrix[7] = (dx1 * dy3 - dx3 * dy1) / (dx1 * dy2 - dx2 * dy1) /
(normalized_height - 1);
matrix[8] = 1;
matrix[3] = (y1 - y0 + matrix[6] * (normalized_width - 1) * y1) /
(normalized_width - 1);
matrix[4] = (y3 - y0 + matrix[7] * (normalized_height - 1) * y3) /
(normalized_height - 1);
matrix[5] = y0;
matrix[0] = (x1 - x0 + matrix[6] * (normalized_width - 1) * x1) /
(normalized_width - 1);
matrix[1] = (x3 - x0 + matrix[7] * (normalized_height - 1) * x3) /
(normalized_height - 1);
matrix[2] = x0;
}
template <typename T>
__global__ void RoiTransformKernel(const float* input_data,
const float* rois_data,
const int* roi2image_data, int num_rois,
int in_height, int in_width, int channels,
int transformed_height,
int transformed_width, float spatial_scale,
T* output_data) {
int output_size =
num_rois * transformed_height * transformed_width * channels;
CUDA_1D_KERNEL_LOOP(index, output_size) {
// (n, c, out_h, out_w) is an element in the transformed output
int out_w = idx4_4(index, num_rois, channels, transformed_height,
transformed_width);
int out_h = idx4_3(index, num_rois, channels, transformed_height,
transformed_width);
int c = idx4_2(index, num_rois, channels, transformed_height,
transformed_width);
int n = idx4_1(index, num_rois, channels, transformed_height,
transformed_width);
auto bottom_rois = rois_data + n * 8;
int roi_batch_ind = bottom_rois[0];
T roi_x[4];
T roi_y[4];
for (int k = 0; k < 4; ++k) {
roi_x[k] = bottom_rois[2 * k] * spatial_scale;
roi_y[k] = bottom_rois[2 * k + 1] * spatial_scale;
}
// Get transform matrix
T matrix[9];
get_transform_matrix<T>(transformed_width, transformed_height, roi_x, roi_y,
matrix);
// Get source coords
T in_w;
T in_h;
get_source_coords<T>(matrix, out_w, out_h, &in_w, &in_h);
if (in_quad<T>(in_w, in_h, roi_x, roi_y)) {
if (GT<T>(-0.5, in_w) || GT<T>(in_w, static_cast<T>(in_width - 0.5)) ||
GT<T>(-0.5, in_h) || GT<T>(in_h, static_cast<T>(in_height - 0.5))) {
// Skip if source coords is not in input image
output_data[index] = 0.0;
} else {
// Perform bilinear interpolation
int in_n = roi2image_data[n];
bilinear_interpolate<T>(input_data, channels, in_width, in_height, in_n,
c, in_w, in_h, output_data + index);
}
} else {
// Skip if source coords is not in quad
output_data[index] = 0.0;
}
}
}
template <typename T>
class CUDAROIPerspectiveTransformOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<framework::Tensor>("X");
auto* rois = ctx.Input<framework::LoDTensor>("ROIs");
auto* out = ctx.Output<framework::Tensor>("Out");
auto transformed_height = ctx.Attr<int>("transformed_height");
auto transformed_width = ctx.Attr<int>("transformed_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int channels = in_dims[1];
int in_height = in_dims[2];
int in_width = in_dims[3];
int rois_num = rois->dims()[0];
const T* input_data = in->data<T>();
T* output_data = out->mutable_data<T>(ctx.GetPlace());
const T* rois_data = rois->data<T>();
framework::Tensor roi2image;
framework::Tensor roi2image_dev;
roi2image.Resize({rois_num});
int* roi2image_data = roi2image.mutable_data<int>(platform::CPUPlace());
auto lod = rois->lod().back();
for (size_t i = 0; i < lod.size() - 1; ++i) {
for (size_t j = lod[i]; j < lod[i + 1]; ++j) {
roi2image_data[j] = i;
}
}
TensorCopySync(roi2image, ctx.GetPlace(), &roi2image_dev);
int out_size = rois_num * transformed_height * transformed_width * channels;
auto stream = ctx.cuda_device_context().stream();
int block = 512;
int grid = (out_size + block - 1) / block;
hipLaunchKernelGGL(( RoiTransformKernel<T>), dim3(grid), dim3(block), 0, stream,
input_data, rois_data, roi2image_dev.data<int>(), rois_num, in_height,
in_width, channels, transformed_height, transformed_width,
spatial_scale, output_data);
}
};
template <typename T>
__device__ T get_feature_gradient(T xs, T ys, int w, int h, const int width,
const int height) {
if (GT<T>(-0.5, xs) || GT<T>(xs, width - 0.5) || GT<T>(-0.5, ys) ||
GT<T>(ys, height - 0.5)) {
return 0;
}
if (GT<T>(0, xs)) {
xs = 0;
}
if (GT<T>(0, ys)) {
ys = 0;
}
int xs_floor = floor(xs);
int ys_floor = floor(ys);
int xs_ceil;
int ys_ceil;
if (GT_E<T>(xs_floor, width - 1)) {
xs_ceil = xs_floor = width - 1;
xs = static_cast<T>(xs_floor);
} else {
xs_ceil = xs_floor + 1;
}
if (GT_E(ys_floor, height - 1)) {
ys_ceil = ys_floor = height - 1;
ys = static_cast<T>(ys_floor);
} else {
ys_ceil = ys_floor + 1;
}
T weight = 0;
if (w == xs_floor) {
if (h == ys_floor) {
weight = (w + 1 - xs) * (h + 1 - ys);
} else if (h == ys_ceil) {
weight = (w + 1 - xs) * (ys + 1 - h);
}
} else if (w == xs_ceil) {
if (h == ys_floor) {
weight = (xs + 1 - w) * (h + 1 - ys);
} else if (h == ys_ceil) {
weight = (xs + 1 - w) * (ys + 1 - h);
}
}
return weight;
}
template <typename T>
__global__ void RoiTransformGradKernel(
const size_t* lod, const T* rois_data, int batch_size, int num_rois,
int in_height, int in_width, int channels, int transformed_height,
int transformed_width, float spatial_scale, const T* out_grad_data,
T* in_grad_data) {
int input_size = batch_size * in_height * in_width * channels;
CUDA_1D_KERNEL_LOOP(index, input_size) {
// (n, c, h, w) coords in input
int in_w = idx4_4(index, batch_size, channels, in_height, in_width);
int in_h = idx4_3(index, batch_size, channels, in_height, in_width);
int c = idx4_2(index, batch_size, channels, in_height, in_width);
int n = idx4_1(index, batch_size, channels, in_height, in_width);
T gradient = 0.0;
// Accumulate gradient over all RoIs that interpolated this element
for (size_t roi_idx = lod[n]; roi_idx < lod[n + 1]; ++roi_idx) {
const T* rois = rois_data + roi_idx * 8;
T roi_x[4];
T roi_y[4];
for (int k = 0; k < 4; ++k) {
roi_x[k] = rois[2 * k] * spatial_scale;
roi_y[k] = rois[2 * k + 1] * spatial_scale;
}
// Get transform matrix
T matrix[9];
get_transform_matrix<T>(transformed_width, transformed_height, roi_x,
roi_y, matrix);
const T* out_grad_ptr =
out_grad_data +
(roi_idx * channels + c) * transformed_height * transformed_width;
for (int out_h = 0; out_h < transformed_height; ++out_h) {
for (int out_w = 0; out_w < transformed_width; ++out_w) {
T src_w;
T src_h;
get_source_coords<T>(matrix, out_w, out_h, &src_w, &src_h);
if (in_quad<T>(src_w, src_h, roi_x, roi_y)) {
if (GT<T>(-0.5, src_w) ||
GT<T>(src_w, static_cast<T>(in_width - 0.5)) ||
GT<T>(-0.5, src_h) ||
GT<T>(src_h, static_cast<T>(in_height - 0.5))) {
continue;
}
T weight = get_feature_gradient<T>(src_w, src_h, in_w, in_h,
in_width, in_height);
gradient +=
out_grad_ptr[out_h * transformed_width + out_w] * weight;
}
}
}
}
in_grad_data[index] = gradient;
}
}
template <typename T>
class CUDAROIPerspectiveTransformGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<framework::Tensor>("X");
auto* rois = ctx.Input<framework::LoDTensor>("ROIs");
auto* out_grad =
ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* in_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
auto transformed_height = ctx.Attr<int>("transformed_height");
auto transformed_width = ctx.Attr<int>("transformed_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int channels = in_dims[1];
int in_height = in_dims[2];
int in_width = in_dims[3];
int rois_num = rois->dims()[0];
T* in_grad_data = in_grad->mutable_data<T>(ctx.GetPlace());
const T* out_grad_data = out_grad->data<T>();
const T* rois_data = rois->data<T>();
auto lod = rois->lod().back();
auto lod_data = lod.CUDAData(ctx.GetPlace());
int in_size = in->numel();
auto stream = ctx.cuda_device_context().stream();
int block = 512;
int grid = (in_size + block - 1) / block;
hipLaunchKernelGGL(( RoiTransformGradKernel<T>), dim3(grid), dim3(block), 0, stream,
lod_data, rois_data, batch_size, rois_num, in_height, in_width,
channels, transformed_height, transformed_width, spatial_scale,
out_grad_data, in_grad_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(roi_perspective_transform,
ops::CUDAROIPerspectiveTransformOpKernel<float>);
REGISTER_OP_CUDA_KERNEL(roi_perspective_transform_grad,
ops::CUDAROIPerspectiveTransformGradOpKernel<float>);
| a5e373f4446b13c33e2f5a08a838afb16673e49f.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
using paddle::platform::float16;
namespace paddle {
namespace operators {
// CUDA: index helpers
#define idx4_4(index, d1, d2, d3, d4) (index % d4)
#define idx4_3(index, d1, d2, d3, d4) ((index / d4) % d3)
#define idx4_2(index, d1, d2, d3, d4) ((index / d4 / d3) % d2)
#define idx4_1(index, d1, d2, d3, d4) ((index / d4 / d3 / d2) % d1)
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ bool GT_E(T a, T b) {
return (a > b) || Eigen::numext::abs(a - b) < 1e-4;
}
template <typename T>
__device__ bool LT_E(T a, T b) {
return (a < b) || Eigen::numext::abs(a - b) < 1e-4;
}
template <typename T>
__device__ bool GT(T a, T b) {
return (a - b) > 1e-4;
}
template <typename T>
__device__ T max(T a, T b) {
return a > b ? a : b;
}
template <typename T>
__device__ T min(T a, T b) {
return a < b ? a : b;
}
/*
* check if (x, y) is in the boundary of roi
*/
template <typename T>
__device__ bool in_quad(T x, T y, T roi_x[], T roi_y[]) {
for (int i = 0; i < 4; i++) {
T start_w = roi_x[i];
T start_h = roi_y[i];
T end_w = roi_x[(i + 1) % 4];
T end_h = roi_y[(i + 1) % 4];
if (fabs(start_h - end_h) < 1e-4) {
if (fabs(y - start_h) < 1e-4 && fabs(y - end_h) < 1e-4 &&
GT_E<T>(x, min<T>(start_w, end_w)) &&
LT_E<T>(x, max<T>(start_w, end_w))) {
return true;
}
} else {
T intersec_x =
(y - start_h) * (end_w - start_w) / (end_h - start_h) + start_w;
if (fabs(intersec_x - x) < 1e-4 && GT_E(y, min<T>(start_h, end_h)) &&
LT_E<T>(y, max<T>(start_h, end_h))) {
return true;
}
}
}
int n_cross = 0;
for (int i = 0; i < 4; i++) {
T start_w = roi_x[i];
T start_h = roi_y[i];
T end_w = roi_x[(i + 1) % 4];
T end_h = roi_y[(i + 1) % 4];
if (fabs(start_h - end_h) < 1e-4) {
continue;
}
if (LT_E<T>(y, min<T>(start_h, end_h)) ||
GT<T>(y, max<T>(start_h, end_h))) {
continue;
}
T intersec_x =
(y - start_h) * (end_w - start_w) / (end_h - start_h) + start_w;
if (fabs(intersec_x - x) < 1e-4) {
return true;
}
if (GT<T>(intersec_x, x)) {
n_cross++;
}
}
return (n_cross % 2 == 1);
}
/**
* Perform bilinear interpolation in the input feature map.
*/
template <typename T>
__device__ void bilinear_interpolate(const T* in_data, const int channels,
const int width, const int height,
int in_n, int in_c, T in_w, T in_h,
T* val) {
// Deal with cases that source coords are out of feature map boundary
if (GT<T>(-0.5, in_w) || GT<T>(in_w, width - 0.5) || GT<T>(-0.5, in_h) ||
GT<T>(in_h, height - 0.5)) {
val[0] = 0.0;
return;
}
if (GT<T>(0, in_w)) {
in_w = 0;
}
if (GT<T>(0, in_h)) {
in_h = 0;
}
int in_w_floor = floor(in_w);
int in_h_floor = floor(in_h);
int in_w_ceil;
int in_h_ceil;
if (GT_E<T>(in_w_floor, width - 1)) {
in_w_ceil = in_w_floor = width - 1;
in_w = static_cast<T>(in_w_floor);
} else {
in_w_ceil = in_w_floor + 1;
}
if (GT_E<T>(in_h_floor, height - 1)) {
in_h_ceil = in_h_floor = height - 1;
in_h = static_cast<T>(in_h_floor);
} else {
in_h_ceil = in_h_floor + 1;
}
T w_floor = in_w - in_w_floor;
T h_floor = in_h - in_h_floor;
T w_ceil = 1 - w_floor;
T h_ceil = 1 - h_floor;
const T* data = in_data + (in_n * channels + in_c) * height * width;
// Do bilinear interpolation
T v1 = data[in_h_floor * width + in_w_floor];
T v2 = data[in_h_ceil * width + in_w_floor];
T v3 = data[in_h_ceil * width + in_w_ceil];
T v4 = data[in_h_floor * width + in_w_ceil];
T w1 = w_ceil * h_ceil;
T w2 = w_ceil * h_floor;
T w3 = w_floor * h_floor;
T w4 = w_floor * h_ceil;
val[0] = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4;
}
/**
* Get the source coordinates in the input feature map.
*
* (u, v, w)^matrix = T * (out_w, out_h, 1)^matrix
*
* in_w = u / w
* in_h = v / w
*
*/
template <typename T>
__device__ void get_source_coords(T matrix[], int out_w, int out_h, T* in_w,
T* in_h) {
T u = matrix[0] * out_w + matrix[1] * out_h + matrix[2];
T v = matrix[3] * out_w + matrix[4] * out_h + matrix[5];
T w = matrix[6] * out_w + matrix[7] * out_h + matrix[8];
in_w[0] = u / w;
in_h[0] = v / w;
}
/**
* Get the matrix of perspective transform.
*
* dx1 = x1 - x2
* dx2 = x3 - x2
* dx3 = x0 - x1 + x2 - x3
* dy1 = y1 - y2
* dy2 = y3 - y2
* dy3 = y0 - y1 + y2 - y3
*
* a11 = (x1 - x0 + a31 * (w - 1) * x1) / (w - 1)
* a12 = (x3 - x0 + a32 * (h - 1) * x3) / (h - 1)
* a13 = x0
* a21 = (y1 - y0 + a31 * (w - 1) * y1) / (w - 1)
* a22 = (y3 - y0 + a32 * (h - 1) * y3) / (h - 1)
* a23 = y0
* a31 = (dx3 * dy2 - dx2 * dy3) / (dx1 * dy2 - dx2 * dy1) / (w - 1)
* a32 = (dx1 * dy3 - dx3 * dy1) / (dx1 * dy2 - dx2 * dy1) / (h - 1)
* a33 = 1
*
*/
template <typename T>
__device__ void get_transform_matrix(const int transformed_width,
const int transformed_height, T roi_x[],
T roi_y[], T matrix[]) {
T x0 = roi_x[0];
T x1 = roi_x[1];
T x2 = roi_x[2];
T x3 = roi_x[3];
T y0 = roi_y[0];
T y1 = roi_y[1];
T y2 = roi_y[2];
T y3 = roi_y[3];
// Estimate the height and width of RoI
T len1 = sqrt((x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1));
T len2 = sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2));
T len3 = sqrt((x2 - x3) * (x2 - x3) + (y2 - y3) * (y2 - y3));
T len4 = sqrt((x3 - x0) * (x3 - x0) + (y3 - y0) * (y3 - y0));
T estimated_height = (len2 + len4) / 2.0;
T estimated_width = (len1 + len3) / 2.0;
// Get the normalized height and normalized width
int normalized_height = transformed_height;
int normalized_width =
round(estimated_width * (normalized_height - 1) / estimated_height) + 1;
normalized_width = min(normalized_width, transformed_width);
T dx1 = x1 - x2;
T dx2 = x3 - x2;
T dx3 = x0 - x1 + x2 - x3;
T dy1 = y1 - y2;
T dy2 = y3 - y2;
T dy3 = y0 - y1 + y2 - y3;
matrix[6] = (dx3 * dy2 - dx2 * dy3) / (dx1 * dy2 - dx2 * dy1) /
(normalized_width - 1);
matrix[7] = (dx1 * dy3 - dx3 * dy1) / (dx1 * dy2 - dx2 * dy1) /
(normalized_height - 1);
matrix[8] = 1;
matrix[3] = (y1 - y0 + matrix[6] * (normalized_width - 1) * y1) /
(normalized_width - 1);
matrix[4] = (y3 - y0 + matrix[7] * (normalized_height - 1) * y3) /
(normalized_height - 1);
matrix[5] = y0;
matrix[0] = (x1 - x0 + matrix[6] * (normalized_width - 1) * x1) /
(normalized_width - 1);
matrix[1] = (x3 - x0 + matrix[7] * (normalized_height - 1) * x3) /
(normalized_height - 1);
matrix[2] = x0;
}
template <typename T>
__global__ void RoiTransformKernel(const float* input_data,
const float* rois_data,
const int* roi2image_data, int num_rois,
int in_height, int in_width, int channels,
int transformed_height,
int transformed_width, float spatial_scale,
T* output_data) {
int output_size =
num_rois * transformed_height * transformed_width * channels;
CUDA_1D_KERNEL_LOOP(index, output_size) {
// (n, c, out_h, out_w) is an element in the transformed output
int out_w = idx4_4(index, num_rois, channels, transformed_height,
transformed_width);
int out_h = idx4_3(index, num_rois, channels, transformed_height,
transformed_width);
int c = idx4_2(index, num_rois, channels, transformed_height,
transformed_width);
int n = idx4_1(index, num_rois, channels, transformed_height,
transformed_width);
auto bottom_rois = rois_data + n * 8;
int roi_batch_ind = bottom_rois[0];
T roi_x[4];
T roi_y[4];
for (int k = 0; k < 4; ++k) {
roi_x[k] = bottom_rois[2 * k] * spatial_scale;
roi_y[k] = bottom_rois[2 * k + 1] * spatial_scale;
}
// Get transform matrix
T matrix[9];
get_transform_matrix<T>(transformed_width, transformed_height, roi_x, roi_y,
matrix);
// Get source coords
T in_w;
T in_h;
get_source_coords<T>(matrix, out_w, out_h, &in_w, &in_h);
if (in_quad<T>(in_w, in_h, roi_x, roi_y)) {
if (GT<T>(-0.5, in_w) || GT<T>(in_w, static_cast<T>(in_width - 0.5)) ||
GT<T>(-0.5, in_h) || GT<T>(in_h, static_cast<T>(in_height - 0.5))) {
// Skip if source coords is not in input image
output_data[index] = 0.0;
} else {
// Perform bilinear interpolation
int in_n = roi2image_data[n];
bilinear_interpolate<T>(input_data, channels, in_width, in_height, in_n,
c, in_w, in_h, output_data + index);
}
} else {
// Skip if source coords is not in quad
output_data[index] = 0.0;
}
}
}
template <typename T>
class CUDAROIPerspectiveTransformOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<framework::Tensor>("X");
auto* rois = ctx.Input<framework::LoDTensor>("ROIs");
auto* out = ctx.Output<framework::Tensor>("Out");
auto transformed_height = ctx.Attr<int>("transformed_height");
auto transformed_width = ctx.Attr<int>("transformed_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int channels = in_dims[1];
int in_height = in_dims[2];
int in_width = in_dims[3];
int rois_num = rois->dims()[0];
const T* input_data = in->data<T>();
T* output_data = out->mutable_data<T>(ctx.GetPlace());
const T* rois_data = rois->data<T>();
framework::Tensor roi2image;
framework::Tensor roi2image_dev;
roi2image.Resize({rois_num});
int* roi2image_data = roi2image.mutable_data<int>(platform::CPUPlace());
auto lod = rois->lod().back();
for (size_t i = 0; i < lod.size() - 1; ++i) {
for (size_t j = lod[i]; j < lod[i + 1]; ++j) {
roi2image_data[j] = i;
}
}
TensorCopySync(roi2image, ctx.GetPlace(), &roi2image_dev);
int out_size = rois_num * transformed_height * transformed_width * channels;
auto stream = ctx.cuda_device_context().stream();
int block = 512;
int grid = (out_size + block - 1) / block;
RoiTransformKernel<T><<<grid, block, 0, stream>>>(
input_data, rois_data, roi2image_dev.data<int>(), rois_num, in_height,
in_width, channels, transformed_height, transformed_width,
spatial_scale, output_data);
}
};
template <typename T>
__device__ T get_feature_gradient(T xs, T ys, int w, int h, const int width,
const int height) {
if (GT<T>(-0.5, xs) || GT<T>(xs, width - 0.5) || GT<T>(-0.5, ys) ||
GT<T>(ys, height - 0.5)) {
return 0;
}
if (GT<T>(0, xs)) {
xs = 0;
}
if (GT<T>(0, ys)) {
ys = 0;
}
int xs_floor = floor(xs);
int ys_floor = floor(ys);
int xs_ceil;
int ys_ceil;
if (GT_E<T>(xs_floor, width - 1)) {
xs_ceil = xs_floor = width - 1;
xs = static_cast<T>(xs_floor);
} else {
xs_ceil = xs_floor + 1;
}
if (GT_E(ys_floor, height - 1)) {
ys_ceil = ys_floor = height - 1;
ys = static_cast<T>(ys_floor);
} else {
ys_ceil = ys_floor + 1;
}
T weight = 0;
if (w == xs_floor) {
if (h == ys_floor) {
weight = (w + 1 - xs) * (h + 1 - ys);
} else if (h == ys_ceil) {
weight = (w + 1 - xs) * (ys + 1 - h);
}
} else if (w == xs_ceil) {
if (h == ys_floor) {
weight = (xs + 1 - w) * (h + 1 - ys);
} else if (h == ys_ceil) {
weight = (xs + 1 - w) * (ys + 1 - h);
}
}
return weight;
}
template <typename T>
__global__ void RoiTransformGradKernel(
const size_t* lod, const T* rois_data, int batch_size, int num_rois,
int in_height, int in_width, int channels, int transformed_height,
int transformed_width, float spatial_scale, const T* out_grad_data,
T* in_grad_data) {
int input_size = batch_size * in_height * in_width * channels;
CUDA_1D_KERNEL_LOOP(index, input_size) {
// (n, c, h, w) coords in input
int in_w = idx4_4(index, batch_size, channels, in_height, in_width);
int in_h = idx4_3(index, batch_size, channels, in_height, in_width);
int c = idx4_2(index, batch_size, channels, in_height, in_width);
int n = idx4_1(index, batch_size, channels, in_height, in_width);
T gradient = 0.0;
// Accumulate gradient over all RoIs that interpolated this element
for (size_t roi_idx = lod[n]; roi_idx < lod[n + 1]; ++roi_idx) {
const T* rois = rois_data + roi_idx * 8;
T roi_x[4];
T roi_y[4];
for (int k = 0; k < 4; ++k) {
roi_x[k] = rois[2 * k] * spatial_scale;
roi_y[k] = rois[2 * k + 1] * spatial_scale;
}
// Get transform matrix
T matrix[9];
get_transform_matrix<T>(transformed_width, transformed_height, roi_x,
roi_y, matrix);
const T* out_grad_ptr =
out_grad_data +
(roi_idx * channels + c) * transformed_height * transformed_width;
for (int out_h = 0; out_h < transformed_height; ++out_h) {
for (int out_w = 0; out_w < transformed_width; ++out_w) {
T src_w;
T src_h;
get_source_coords<T>(matrix, out_w, out_h, &src_w, &src_h);
if (in_quad<T>(src_w, src_h, roi_x, roi_y)) {
if (GT<T>(-0.5, src_w) ||
GT<T>(src_w, static_cast<T>(in_width - 0.5)) ||
GT<T>(-0.5, src_h) ||
GT<T>(src_h, static_cast<T>(in_height - 0.5))) {
continue;
}
T weight = get_feature_gradient<T>(src_w, src_h, in_w, in_h,
in_width, in_height);
gradient +=
out_grad_ptr[out_h * transformed_width + out_w] * weight;
}
}
}
}
in_grad_data[index] = gradient;
}
}
template <typename T>
class CUDAROIPerspectiveTransformGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<framework::Tensor>("X");
auto* rois = ctx.Input<framework::LoDTensor>("ROIs");
auto* out_grad =
ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* in_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
auto transformed_height = ctx.Attr<int>("transformed_height");
auto transformed_width = ctx.Attr<int>("transformed_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int channels = in_dims[1];
int in_height = in_dims[2];
int in_width = in_dims[3];
int rois_num = rois->dims()[0];
T* in_grad_data = in_grad->mutable_data<T>(ctx.GetPlace());
const T* out_grad_data = out_grad->data<T>();
const T* rois_data = rois->data<T>();
auto lod = rois->lod().back();
auto lod_data = lod.CUDAData(ctx.GetPlace());
int in_size = in->numel();
auto stream = ctx.cuda_device_context().stream();
int block = 512;
int grid = (in_size + block - 1) / block;
RoiTransformGradKernel<T><<<grid, block, 0, stream>>>(
lod_data, rois_data, batch_size, rois_num, in_height, in_width,
channels, transformed_height, transformed_width, spatial_scale,
out_grad_data, in_grad_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(roi_perspective_transform,
ops::CUDAROIPerspectiveTransformOpKernel<float>);
REGISTER_OP_CUDA_KERNEL(roi_perspective_transform_grad,
ops::CUDAROIPerspectiveTransformGradOpKernel<float>);
|
470bf1c2feb8bd26165786fb47027b0bc38ce8d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <limits>
#include <vector>
#include "lite/core/op_registry.h"
#include "lite/kernels/cuda/sequence_topk_avg_pooling_compute.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
template <typename Dtype>
__global__ void topk_avg_pooling_kernel_by_row_improve(
Dtype *output_data,
const Dtype *input,
const int *gpu_input_offset_l,
const int *gpu_input_offset_r,
const int topk_size,
const int *topks,
const int feat_map_num) {
int row =
gpu_input_offset_l[blockIdx.x + 1] - gpu_input_offset_l[blockIdx.x]; // 8
int col = gpu_input_offset_r[blockIdx.x + 1] -
gpu_input_offset_r[blockIdx.x]; // 30
int max_k = topks[topk_size - 1];
max_k = max_k < col ? max_k : col;
extern __shared__ Dtype smem[]; // H*W
const Dtype *fm_row_in_data = input;
for (int i = 0; i < blockIdx.x; ++i) {
int tmp_row = gpu_input_offset_l[i + 1] - gpu_input_offset_l[i];
int tmp_col = gpu_input_offset_r[i + 1] - gpu_input_offset_r[i];
fm_row_in_data += tmp_row * feat_map_num * tmp_col;
}
fm_row_in_data += blockIdx.y * row * col;
for (int i = threadIdx.x; i < row * col; i += blockDim.x) {
smem[i] = fm_row_in_data[i];
}
__syncthreads();
for (int idx = threadIdx.x; idx < row; idx += blockDim.x) {
Dtype *fm_row_out_data =
output_data +
(gpu_input_offset_l[blockIdx.x] + idx) * feat_map_num * topk_size +
blockIdx.y * topk_size;
Dtype *smem_start_col = smem + idx * col;
int counter = max_k; // topk_size;
Dtype last_max_val = -20000.0;
while (counter) {
Dtype max_val = -10000.0;
int max_pos = 0;
int m = 0;
for (; m < col; m++) {
Dtype cur_data = smem_start_col[m];
if (cur_data > max_val) {
max_val = cur_data;
max_pos = m;
last_max_val = max_val;
}
}
if (max_val < -9999.0) { // == -10000.0
max_val = last_max_val;
}
smem_start_col[max_pos] = -10000000.0;
int i = max_k - counter;
for (int c = 0; c < topk_size; c++) {
if (i <= topks[c] - 1) {
fm_row_out_data[c] += max_val;
}
}
counter--;
}
__syncthreads();
// compute avg
for (int i = 0; i < topk_size; i++) {
fm_row_out_data[i] = fm_row_out_data[i] / topks[i];
}
}
}
template <typename T>
void SequenceTopkAvgPoolingCompute<T>::Run() {
auto ¶m = this->Param<param_t>();
auto &ctx = this->ctx_->template As<CUDAContext>();
auto cuda_stream = ctx.exec_stream();
int topk_num = param.topks.size();
lite::DDim top_ks_shape(std::vector<int64_t>{topk_num, 1, 1, 1});
_top_ks.Resize(top_ks_shape);
hipMemcpyAsync(_top_ks.mutable_data<int>(TARGET(kCUDA)),
¶m.topks[0],
sizeof(int) * topk_num,
hipMemcpyHostToDevice,
cuda_stream);
int width_offset_len = param.COLUMN->lod()[0].size();
lite::DDim width_offset_shape(
std::vector<int64_t>{width_offset_len, 1, 1, 1});
_width_offset.Resize(width_offset_shape);
std::vector<int> width_lod_0(width_offset_len, 0);
for (size_t i = 0; i < param.COLUMN->lod()[0].size(); ++i) {
width_lod_0[i] = static_cast<int>(param.COLUMN->lod()[0][i]);
}
hipMemcpyAsync(_width_offset.mutable_data<int>(TARGET(kCUDA)),
&width_lod_0[0],
sizeof(int) * width_offset_len,
hipMemcpyHostToDevice,
cuda_stream);
int height_offset_len = param.ROW->lod()[0].size();
lite::DDim height_offset_shape(
std::vector<int64_t>{height_offset_len, 1, 1, 1});
_height_offset.Resize(height_offset_shape);
std::vector<int> height_lod_0(height_offset_len, 0);
for (size_t i = 0; i < param.ROW->lod()[0].size(); ++i) {
height_lod_0[i] = static_cast<int>(param.ROW->lod()[0][i]);
}
hipMemcpyAsync(_height_offset.mutable_data<int>(TARGET(kCUDA)),
&height_lod_0[0],
sizeof(int) * height_offset_len,
hipMemcpyHostToDevice,
cuda_stream);
const Tensor *x_tensor = param.X;
Tensor *out_tensor = param.Out;
const T *in_data = x_tensor->data<T>();
T *out_data = out_tensor->mutable_data<T>(TARGET(kCUDA));
TargetWrapperCuda::MemsetAsync(out_tensor->mutable_data<T>(TARGET(kCUDA)),
0,
sizeof(T) * out_tensor->numel(),
cuda_stream);
int num = param.ROW->lod()[0].size() - 1;
int channel = param.channel_num;
const int *height_offset = _height_offset.data<int>();
const int *width_offset = _width_offset.data<int>();
int feat_map_size = 0;
for (size_t i = 0; i < height_lod_0.size() - 1; ++i) {
int height = height_lod_0[i + 1] - height_lod_0[i];
int width = width_lod_0[i + 1] - width_lod_0[i];
if (height * width > feat_map_size) {
feat_map_size = height * width;
}
}
dim3 blocks(num, channel);
dim3 threads(32, 1);
hipLaunchKernelGGL(( topk_avg_pooling_kernel_by_row_improve<
T>), dim3(blocks), dim3(threads), feat_map_size * sizeof(T), cuda_stream,
out_data,
in_data,
height_offset,
width_offset,
param.topks.size(),
_top_ks.data<int>(),
param.channel_num);
hipError_t error = hipGetLastError();
if (error != hipSuccess) LOG(ERROR) << hipGetErrorString(error);
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_LITE_KERNEL(
sequence_topk_avg_pooling,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::SequenceTopkAvgPoolingCompute<float>,
def)
.BindInput("X",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindInput("ROW",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindInput("COLUMN",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindOutput("pos",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.Finalize();
| 470bf1c2feb8bd26165786fb47027b0bc38ce8d7.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <limits>
#include <vector>
#include "lite/core/op_registry.h"
#include "lite/kernels/cuda/sequence_topk_avg_pooling_compute.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
template <typename Dtype>
__global__ void topk_avg_pooling_kernel_by_row_improve(
Dtype *output_data,
const Dtype *input,
const int *gpu_input_offset_l,
const int *gpu_input_offset_r,
const int topk_size,
const int *topks,
const int feat_map_num) {
int row =
gpu_input_offset_l[blockIdx.x + 1] - gpu_input_offset_l[blockIdx.x]; // 8
int col = gpu_input_offset_r[blockIdx.x + 1] -
gpu_input_offset_r[blockIdx.x]; // 30
int max_k = topks[topk_size - 1];
max_k = max_k < col ? max_k : col;
extern __shared__ Dtype smem[]; // H*W
const Dtype *fm_row_in_data = input;
for (int i = 0; i < blockIdx.x; ++i) {
int tmp_row = gpu_input_offset_l[i + 1] - gpu_input_offset_l[i];
int tmp_col = gpu_input_offset_r[i + 1] - gpu_input_offset_r[i];
fm_row_in_data += tmp_row * feat_map_num * tmp_col;
}
fm_row_in_data += blockIdx.y * row * col;
for (int i = threadIdx.x; i < row * col; i += blockDim.x) {
smem[i] = fm_row_in_data[i];
}
__syncthreads();
for (int idx = threadIdx.x; idx < row; idx += blockDim.x) {
Dtype *fm_row_out_data =
output_data +
(gpu_input_offset_l[blockIdx.x] + idx) * feat_map_num * topk_size +
blockIdx.y * topk_size;
Dtype *smem_start_col = smem + idx * col;
int counter = max_k; // topk_size;
Dtype last_max_val = -20000.0;
while (counter) {
Dtype max_val = -10000.0;
int max_pos = 0;
int m = 0;
for (; m < col; m++) {
Dtype cur_data = smem_start_col[m];
if (cur_data > max_val) {
max_val = cur_data;
max_pos = m;
last_max_val = max_val;
}
}
if (max_val < -9999.0) { // == -10000.0
max_val = last_max_val;
}
smem_start_col[max_pos] = -10000000.0;
int i = max_k - counter;
for (int c = 0; c < topk_size; c++) {
if (i <= topks[c] - 1) {
fm_row_out_data[c] += max_val;
}
}
counter--;
}
__syncthreads();
// compute avg
for (int i = 0; i < topk_size; i++) {
fm_row_out_data[i] = fm_row_out_data[i] / topks[i];
}
}
}
template <typename T>
void SequenceTopkAvgPoolingCompute<T>::Run() {
auto ¶m = this->Param<param_t>();
auto &ctx = this->ctx_->template As<CUDAContext>();
auto cuda_stream = ctx.exec_stream();
int topk_num = param.topks.size();
lite::DDim top_ks_shape(std::vector<int64_t>{topk_num, 1, 1, 1});
_top_ks.Resize(top_ks_shape);
cudaMemcpyAsync(_top_ks.mutable_data<int>(TARGET(kCUDA)),
¶m.topks[0],
sizeof(int) * topk_num,
cudaMemcpyHostToDevice,
cuda_stream);
int width_offset_len = param.COLUMN->lod()[0].size();
lite::DDim width_offset_shape(
std::vector<int64_t>{width_offset_len, 1, 1, 1});
_width_offset.Resize(width_offset_shape);
std::vector<int> width_lod_0(width_offset_len, 0);
for (size_t i = 0; i < param.COLUMN->lod()[0].size(); ++i) {
width_lod_0[i] = static_cast<int>(param.COLUMN->lod()[0][i]);
}
cudaMemcpyAsync(_width_offset.mutable_data<int>(TARGET(kCUDA)),
&width_lod_0[0],
sizeof(int) * width_offset_len,
cudaMemcpyHostToDevice,
cuda_stream);
int height_offset_len = param.ROW->lod()[0].size();
lite::DDim height_offset_shape(
std::vector<int64_t>{height_offset_len, 1, 1, 1});
_height_offset.Resize(height_offset_shape);
std::vector<int> height_lod_0(height_offset_len, 0);
for (size_t i = 0; i < param.ROW->lod()[0].size(); ++i) {
height_lod_0[i] = static_cast<int>(param.ROW->lod()[0][i]);
}
cudaMemcpyAsync(_height_offset.mutable_data<int>(TARGET(kCUDA)),
&height_lod_0[0],
sizeof(int) * height_offset_len,
cudaMemcpyHostToDevice,
cuda_stream);
const Tensor *x_tensor = param.X;
Tensor *out_tensor = param.Out;
const T *in_data = x_tensor->data<T>();
T *out_data = out_tensor->mutable_data<T>(TARGET(kCUDA));
TargetWrapperCuda::MemsetAsync(out_tensor->mutable_data<T>(TARGET(kCUDA)),
0,
sizeof(T) * out_tensor->numel(),
cuda_stream);
int num = param.ROW->lod()[0].size() - 1;
int channel = param.channel_num;
const int *height_offset = _height_offset.data<int>();
const int *width_offset = _width_offset.data<int>();
int feat_map_size = 0;
for (size_t i = 0; i < height_lod_0.size() - 1; ++i) {
int height = height_lod_0[i + 1] - height_lod_0[i];
int width = width_lod_0[i + 1] - width_lod_0[i];
if (height * width > feat_map_size) {
feat_map_size = height * width;
}
}
dim3 blocks(num, channel);
dim3 threads(32, 1);
topk_avg_pooling_kernel_by_row_improve<
T><<<blocks, threads, feat_map_size * sizeof(T), cuda_stream>>>(
out_data,
in_data,
height_offset,
width_offset,
param.topks.size(),
_top_ks.data<int>(),
param.channel_num);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) LOG(ERROR) << cudaGetErrorString(error);
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_LITE_KERNEL(
sequence_topk_avg_pooling,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::SequenceTopkAvgPoolingCompute<float>,
def)
.BindInput("X",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindInput("ROW",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindInput("COLUMN",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindOutput("pos",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.Finalize();
|
93ff438ef4eb2f0e2896f6b17909ca2e73667262.hip | // !!! This is a file automatically generated by hipify!!!
/*
BGR0 to YUV converter
I have artifacts and changing md5sum now.
*/
// System includes
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include <stdint.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// helper functions and utilities to work with CUDA
//#include <helper_functions.h>
//#include <helper_cuda.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void bgr0_to_nv12_pixel(unsigned char *dinput, unsigned char *doutput, int cols, int rows) {
//int i = threadIdx.x;
//int j = threadIdx.y;
//char pixel_data[4];
int col_num = blockIdx.x*blockDim.x+threadIdx.x;
int row_num = blockIdx.y*blockDim.y+threadIdx.y;
if ((row_num < rows) && (col_num < cols))
{
//int global_offset = blockIdx.x*blockDim.x*blockDim.y + threadIdx.y*blockDim.x + threadIdx.x;
int global_offset = row_num*cols+col_num;
//char pixel_data[4];
//memcpy(&pixel_data,dinput+global_offset*4,4);
int r,g,b;
r = dinput[global_offset*4+2];
g = dinput[global_offset*4+1];
b = dinput[global_offset*4+0];
//r = pixel_data[2];
//g = pixel_data[1];
//b = pixel_data[0];
doutput[global_offset] = ((66*r + 129*g + 25*b) >> 8) + 16;
if(((threadIdx.x % 2) == 0) and ((threadIdx.y % 2) == 0)){
doutput[cols*rows+row_num*cols/2+col_num+1] = ((112*r + -94*g + -18*b) >> 8) + 128;
doutput[cols*rows+row_num*cols/2+col_num] = ((-38*r + -74*g + 112*b) >> 8) + 128;
}
}
}
__global__ void bgr0_to_nv12_pixel_uint(unsigned char *dinput, unsigned char *doutput, int cols, int rows) {
int col_num = blockIdx.x*blockDim.x+threadIdx.x;
int row_num = blockIdx.y*blockDim.y+threadIdx.y;
if ((row_num < rows) && (col_num < cols))
{
//int global_offset = blockIdx.x*blockDim.x*blockDim.y + threadIdx.y*blockDim.x + threadIdx.x;
int global_offset = row_num*cols+col_num;
uint32_t a = *((uint32_t *)&dinput[global_offset*4]);
int r,g,b;
r = a & 0xff;
g = ( a >> 8 ) & 0xff;
b = ( a >> 16 ) & 0xff;
doutput[global_offset] = ((66*r + 129*g + 25*b) >> 8) + 16;
if(((threadIdx.x & 1) == 0) and ((threadIdx.y & 1) == 0)){
int uv_offset = cols*rows+((row_num*cols)>>1)+col_num;
doutput[uv_offset] = ((112*r + -94*g + -18*b) >> 8) + 128;
doutput[uv_offset+1] = ((-38*r + -74*g + 112*b) >> 8) + 128;
}
}
}
struct cuda_memory_struct
{
uint8_t *dinput;
uint8_t *doutput;
};
extern "C" void * cuda_memory_init(int width,int height){
int pan_size = width*height*4;
int pan_size_nv12 = width*height/2*3;
cuda_memory_struct * cuda_memory_ptr = (cuda_memory_struct * )malloc(sizeof(cuda_memory_struct));
//cuda_memory_ptr.dinput =
clock_t end, start;
float seconds;
start = clock();
hipMalloc((void **)&cuda_memory_ptr->dinput, sizeof(char)*pan_size );
hipMalloc((void **)&cuda_memory_ptr->doutput, sizeof(char)*pan_size_nv12 );
end = clock();
seconds = (float)(end - start) / CLOCKS_PER_SEC;
printf("Device malloc: %fs\n",seconds);
return cuda_memory_ptr;
}
extern "C" int cuda_br0_to_nv12(cuda_memory_struct * cuda_memory_ptr,uint8_t * bgr0,uint8_t * nv12, int width, int height)
{
int pan_size = width*height*4;
int pan_size_nv12 = width*height/2*3;
clock_t end, start;
float seconds;
uint8_t *dinput = NULL;
uint8_t *doutput = NULL;
dinput = cuda_memory_ptr->dinput;
doutput = cuda_memory_ptr->doutput;
start = clock();
hipMemcpy(dinput, bgr0, sizeof(unsigned char)*pan_size, hipMemcpyHostToDevice);
end = clock();
seconds = (float)(end - start) / CLOCKS_PER_SEC;
printf("Memory to device copy took: %fs\n",seconds);
int block_width = 32;
int block_height = 8;
int x = width/block_width;
int y = height/block_height;
const dim3 numBlocks (x, y, 1); // number of blocks
const dim3 threadsPerBlock(block_width, block_height, 1);
start = clock();
hipLaunchKernelGGL(( bgr0_to_nv12_pixel_uint), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, dinput, doutput, width, height);
gpuErrchk( hipPeekAtLastError() );
hipDeviceSynchronize();
end = clock();
seconds = (float)(end - start) / CLOCKS_PER_SEC;
printf("Convertation using uint took: %fs\n",seconds);
start = clock();
hipMemcpy( nv12, doutput, sizeof(unsigned char)*pan_size_nv12, hipMemcpyDeviceToHost);
end = clock();
seconds = (float)(end - start) / CLOCKS_PER_SEC;
printf("Memory to host copy took: %fs\n",seconds);
return 0;
}
| 93ff438ef4eb2f0e2896f6b17909ca2e73667262.cu | /*
BGR0 to YUV converter
I have artifacts and changing md5sum now.
*/
// System includes
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include <stdint.h>
// CUDA runtime
#include <cuda_runtime.h>
// helper functions and utilities to work with CUDA
//#include <helper_functions.h>
//#include <helper_cuda.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void bgr0_to_nv12_pixel(unsigned char *dinput, unsigned char *doutput, int cols, int rows) {
//int i = threadIdx.x;
//int j = threadIdx.y;
//char pixel_data[4];
int col_num = blockIdx.x*blockDim.x+threadIdx.x;
int row_num = blockIdx.y*blockDim.y+threadIdx.y;
if ((row_num < rows) && (col_num < cols))
{
//int global_offset = blockIdx.x*blockDim.x*blockDim.y + threadIdx.y*blockDim.x + threadIdx.x;
int global_offset = row_num*cols+col_num;
//char pixel_data[4];
//memcpy(&pixel_data,dinput+global_offset*4,4);
int r,g,b;
r = dinput[global_offset*4+2];
g = dinput[global_offset*4+1];
b = dinput[global_offset*4+0];
//r = pixel_data[2];
//g = pixel_data[1];
//b = pixel_data[0];
doutput[global_offset] = ((66*r + 129*g + 25*b) >> 8) + 16;
if(((threadIdx.x % 2) == 0) and ((threadIdx.y % 2) == 0)){
doutput[cols*rows+row_num*cols/2+col_num+1] = ((112*r + -94*g + -18*b) >> 8) + 128;
doutput[cols*rows+row_num*cols/2+col_num] = ((-38*r + -74*g + 112*b) >> 8) + 128;
}
}
}
__global__ void bgr0_to_nv12_pixel_uint(unsigned char *dinput, unsigned char *doutput, int cols, int rows) {
int col_num = blockIdx.x*blockDim.x+threadIdx.x;
int row_num = blockIdx.y*blockDim.y+threadIdx.y;
if ((row_num < rows) && (col_num < cols))
{
//int global_offset = blockIdx.x*blockDim.x*blockDim.y + threadIdx.y*blockDim.x + threadIdx.x;
int global_offset = row_num*cols+col_num;
uint32_t a = *((uint32_t *)&dinput[global_offset*4]);
int r,g,b;
r = a & 0xff;
g = ( a >> 8 ) & 0xff;
b = ( a >> 16 ) & 0xff;
doutput[global_offset] = ((66*r + 129*g + 25*b) >> 8) + 16;
if(((threadIdx.x & 1) == 0) and ((threadIdx.y & 1) == 0)){
int uv_offset = cols*rows+((row_num*cols)>>1)+col_num;
doutput[uv_offset] = ((112*r + -94*g + -18*b) >> 8) + 128;
doutput[uv_offset+1] = ((-38*r + -74*g + 112*b) >> 8) + 128;
}
}
}
struct cuda_memory_struct
{
uint8_t *dinput;
uint8_t *doutput;
};
extern "C" void * cuda_memory_init(int width,int height){
int pan_size = width*height*4;
int pan_size_nv12 = width*height/2*3;
cuda_memory_struct * cuda_memory_ptr = (cuda_memory_struct * )malloc(sizeof(cuda_memory_struct));
//cuda_memory_ptr.dinput =
clock_t end, start;
float seconds;
start = clock();
cudaMalloc((void **)&cuda_memory_ptr->dinput, sizeof(char)*pan_size );
cudaMalloc((void **)&cuda_memory_ptr->doutput, sizeof(char)*pan_size_nv12 );
end = clock();
seconds = (float)(end - start) / CLOCKS_PER_SEC;
printf("Device malloc: %fs\n",seconds);
return cuda_memory_ptr;
}
extern "C" int cuda_br0_to_nv12(cuda_memory_struct * cuda_memory_ptr,uint8_t * bgr0,uint8_t * nv12, int width, int height)
{
int pan_size = width*height*4;
int pan_size_nv12 = width*height/2*3;
clock_t end, start;
float seconds;
uint8_t *dinput = NULL;
uint8_t *doutput = NULL;
dinput = cuda_memory_ptr->dinput;
doutput = cuda_memory_ptr->doutput;
start = clock();
cudaMemcpy(dinput, bgr0, sizeof(unsigned char)*pan_size, cudaMemcpyHostToDevice);
end = clock();
seconds = (float)(end - start) / CLOCKS_PER_SEC;
printf("Memory to device copy took: %fs\n",seconds);
int block_width = 32;
int block_height = 8;
int x = width/block_width;
int y = height/block_height;
const dim3 numBlocks (x, y, 1); // number of blocks
const dim3 threadsPerBlock(block_width, block_height, 1);
start = clock();
bgr0_to_nv12_pixel_uint<<<numBlocks, threadsPerBlock>>>(dinput, doutput, width, height);
gpuErrchk( cudaPeekAtLastError() );
cudaDeviceSynchronize();
end = clock();
seconds = (float)(end - start) / CLOCKS_PER_SEC;
printf("Convertation using uint took: %fs\n",seconds);
start = clock();
cudaMemcpy( nv12, doutput, sizeof(unsigned char)*pan_size_nv12, cudaMemcpyDeviceToHost);
end = clock();
seconds = (float)(end - start) / CLOCKS_PER_SEC;
printf("Memory to host copy took: %fs\n",seconds);
return 0;
}
|
9c73f018bed546b68c2d28fa2ef3896d98b4b054.hip | // !!! This is a file automatically generated by hipify!!!
/// LSU EE 7722 GPU Microarchitecture
//
/// Homework 1 SOLUTION - Spring 2016
//
// Assignment: http://www.ece.lsu.edu/koppel/gp/2016/hw01.pdf
// Solution : http://www.ece.lsu.edu/koppel/gp/2016/hw01_sol.pdf
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <ctype.h>
#include <time.h>
#include <new>
#include <hip/hip_runtime.h>
#include <assert.h>
#include "util.h"
// Make it easy to switch between float and double for vertex and matrix
// elements.
//
typedef float Elt_Type;
struct App
{
int array_size;
// Host pointers to the input and output arrays, and to CPU-computed
// output arrays used for checking results.
//
Elt_Type *h_in, *h_out, *h_out_check;
Elt_Type *h_out_sc_check;
/// Problem 3 -- This might come in handy.
int mask;
// GPU pointers to the input and output arrays.
//
Elt_Type *d_in, *d_out;
};
// In host address space.
App app;
// In device constant address space.
__constant__ App d_app;
extern "C" __global__ void
lane_aligned()
{
/// Problem 1 -- Put solution in this routine.
/// SOLUTION Problem 1
//
// Round the block size down to the largest multiple of 32, and
// use that "useful" block size instead of the real one.
// SOLUTION: Round block dim to largest multiple of 32 <= blockDim.x
//
const int useful_block_dim = blockDim.x & ~ 0x1f;
// SOLUTION: Use useful_block_dim to compute number of threads
//
const int useful_num_threads = useful_block_dim * gridDim.x;
// SOLUTION: Just return if we are beyond the useful part.
//
if ( threadIdx.x >= useful_block_dim ) return;
const int tid = threadIdx.x + blockIdx.x * useful_block_dim;
const int lane = threadIdx.x & 0x1f;
const int start = tid;
const int stop = d_app.array_size;
// Encode lane number so that it can be written to output array
// elements. The CPU code will use this to check whether any
// partial warps executed. Please don't change the line below,
// that would be lying to the CPU code.
//
const Elt_Type lane_label = 1e-5 * lane;
for ( int i = start; i < stop; i += useful_num_threads )
d_app.d_out[i] = d_app.d_in[i] + lane_label;
}
/// SOLUTION -- Problem 3
//
// Increase number of iterations from 2 to 16 so that computation
// time is a significant portion of execution time.
// Number of iterations used in scheduler_bm.
//
const int iters = 16;
//
// It is important that this value is a compile-time constant.
extern "C" __global__ void
scheduler_bm()
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = blockDim.x * gridDim.x;
const int start = tid;
const int stop = d_app.array_size;
const int inc = num_threads;
for ( int i = start; i < stop; i += inc )
{
Elt_Type accum = d_app.d_in[i];
// SOLUTION -- Problem 3
//
// Use mask to suppress execution of certain threads.
//
if ( ( tid & d_app.mask ) == 0 )
for ( int j=0; j<iters; j++ ) accum = __sinf(accum);
d_app.d_out[i] = accum;
}
}
GPU_Info
print_gpu_and_kernel_info()
{
GPU_Info info;
print_gpu_info();
// Choose GPU 0 because it's usually the better choice.
//
int dev = 0;
CE(hipSetDevice(dev));
printf("Using GPU %d\n",dev);
info.get_gpu_info(dev);
info.GET_INFO(lane_aligned);
info.GET_INFO(scheduler_bm);
// Print information about kernel.
//
printf("\nCUDA Kernel Resource Usage:\n");
for ( int i=0; i<info.num_kernels; i++ )
{
printf("For %s:\n", info.ki[i].name);
printf(" %6zd shared, %zd const, %zd loc, %d regs; "
"%d max threads per block.\n",
info.ki[i].cfa.sharedSizeBytes,
info.ki[i].cfa.constSizeBytes,
info.ki[i].cfa.localSizeBytes,
info.ki[i].cfa.numRegs,
info.ki[i].cfa.maxThreadsPerBlock);
}
return info;
}
int
main(int argc, char **argv)
{
// Get info about GPU and each kernel.
//
GPU_Info info = print_gpu_and_kernel_info();
const int num_mp = info.cuda_prop.multiProcessorCount;
// Examine argument 1, block count. Default is number of MPs.
//
const int arg1_int = argc < 2 ? num_mp : atoi(argv[1]);
const int num_blocks =
arg1_int == 0 ? num_mp :
arg1_int < 0 ? -arg1_int * num_mp : arg1_int;
// Examine argument 2, number of threads per block.
//
const int thd_per_block_arg = argc < 3 ? 1024 : atoi(argv[2]);
const int thd_per_block_goal =
thd_per_block_arg == 0 ? 1024 : thd_per_block_arg;
const int num_threads = num_blocks * thd_per_block_goal;
const bool vary_warps = thd_per_block_arg == 0;
// Examine argument 3, size of array in MiB. Fractional values okay.
//
app.array_size = argc < 4 ? 1 << 20 : int( atof(argv[3]) * (1<<20) );
if ( num_threads <= 0 || app.array_size <= 0 )
{
printf("Usage: %s [ NUM_CUDA_BLOCKS | -BLOCKS_PER_MP ] [THD_PER_BLOCK] "
"[DATA_SIZE_MiB]\n",
argv[0]);
exit(1);
}
/// SOLUTION -- Problem 3. Get mask value from the command line.
//
app.mask = argc < 5 ? 0 : strtol(argv[4],NULL,0);
const int in_size_bytes = app.array_size * sizeof( app.h_in[0] );
const int out_size_bytes = app.array_size * sizeof( app.h_out[0] );
// Allocate storage for CPU copy of data.
//
app.h_in = new Elt_Type[ app.array_size ];
app.h_out = new Elt_Type[ app.array_size ];
app.h_out_check = new Elt_Type[ app.array_size ];
app.h_out_sc_check = new Elt_Type[ app.array_size ];
// Allocate storage for GPU copy of data.
//
CE( hipMalloc( &app.d_in, in_size_bytes ) );
CE( hipMalloc( &app.d_out, out_size_bytes ) );
// Initialize input array and arrays holding correct answers.
//
for ( int i=0; i<app.array_size; i++ )
{
app.h_in[i] = i + 0.01 * ( i & 0x1f );
app.h_out[i] = 0;
app.h_out_check[i] = app.h_in[i] + 0.00001 * ( i & 0x1f );
Elt_Type accum = app.h_in[i];
for ( int j=0; j<iters; j++ ) accum = sin(accum);
// The NVIDIA hardware sin is not accurate for larger values,
// so use a -2 to indicate that GPU output should not be checked
// at this element.
//
app.h_out_sc_check[i] = i < 100000 ? accum : -2;
}
// Amount of data in and out of GPU chip.
const int amt_data_bytes = in_size_bytes + out_size_bytes;
double elapsed_time_s = 86400; // Reassigned to minimum run time.
{
// Prepare events used for timing.
//
hipEvent_t gpu_start_ce, gpu_stop_ce;
CE(hipEventCreate(&gpu_start_ce));
CE(hipEventCreate(&gpu_stop_ce));
// Copy input array from CPU to GPU.
//
CE( hipMemcpy
( app.d_in, app.h_in, in_size_bytes, hipMemcpyHostToDevice ) );
// Copy App structure to GPU.
//
CE( hipMemcpyToSymbol
( d_app, &app, sizeof(app), 0, hipMemcpyHostToDevice ) );
// Launch kernel multiple times and keep track of the best time.
printf("\nLaunching with %d blocks of up to %d threads for %d elts "
"and mask %#x.\n",
num_blocks, thd_per_block_goal, app.array_size, app.mask);
for ( int kernel = 0; kernel < info.num_kernels; kernel++ )
{
hipFuncAttributes& cfa = info.ki[kernel].cfa;
const int wp_limit = cfa.maxThreadsPerBlock >> 5;
const int thd_limit = wp_limit << 5;
const int thd_per_block_no_vary = min(thd_per_block_goal,thd_limit);
const int wp_start = 4;
const int wp_stop = vary_warps ? wp_limit : wp_start;
const int wp_inc = 4;
for ( int wp_cnt = wp_start; wp_cnt <= wp_stop; wp_cnt += wp_inc )
{
const int thd_per_block =
vary_warps ? wp_cnt << 5 : thd_per_block_no_vary;
// Zero the output array.
//
CE( hipMemset(app.d_out,0,out_size_bytes) );
// Measure execution time starting "now", which is after data
// set to GPU.
//
CE(hipEventRecord(gpu_start_ce,0));
// Launch Kernel
//
info.ki[kernel]hipLaunchKernelGGL((.func_ptr) , dim3(num_blocks), dim3(thd_per_block) , 0, 0, );
//
// Confused?
//
// info.ki[kernel].func_ptr holds a pointer to the kernel
// routine.
// Stop measuring execution time now, which is before is data
// returned from GPU.
//
CE(hipEventRecord(gpu_stop_ce,0));
CE(hipEventSynchronize(gpu_stop_ce));
float cuda_time_ms = -1.1;
CE(hipEventElapsedTime(&cuda_time_ms,gpu_start_ce,gpu_stop_ce));
const double this_elapsed_time_s = cuda_time_ms * 0.001;
const double thpt_data_gbps =
amt_data_bytes / this_elapsed_time_s * 1e-9;
if ( vary_warps )
{
const char* const stars = "********************************************************************************";
const int stars_len = 80;
const double bw_frac =
1e9 * thpt_data_gbps / info.chip_bw_Bps;
const int max_st_len = 52; // Maximum stars length.
// Number of warps, rounded up.
//
const int wps = ( thd_per_block + 31 ) >> 5;
/// Problem 2 Solution Goes Around Here
// The maximum number of active blocks per MP for this
// kernel when launched with a block size of thd_per_block.
//
const int max_bl_per_mp =
info.get_max_active_blocks_per_mp(kernel,thd_per_block);
/// Problem 2: Assign appropriate value.
/// SOLUTION - Problem 2
//
// Compute number of blocks available per MP based only on
// the number of blocks. This may be larger than the
// number of blocks that can run.
//
const int bl_per_mp_available =
0.999 + double(num_blocks) / num_mp;
// The number of active blocks is the minimum of what
// can fit and how many are available.
//
const int bl_per_mp =
min( bl_per_mp_available, max_bl_per_mp );
// Based on the number of blocks, compute the num ber of warps.
//
const int act_wps = wps * bl_per_mp;
if ( wp_cnt == wp_start )
printf("Kernel %s:\n", info.ki[kernel].name);
printf("%2d wp %2d acwp %3.0f s %#4x %5.0f GB/s %s\n",
(thd_per_block + 31 ) >> 5,
act_wps,
this_elapsed_time_s * 1e6,
app.mask,
thpt_data_gbps,
&stars[stars_len-int(bw_frac*max_st_len)]
);
} else {
printf("K %-15s %2d wp %11.3f s %8.3f GB/s\n",
info.ki[kernel].name,
(thd_per_block + 31 ) >> 5,
this_elapsed_time_s * 1e6,
thpt_data_gbps);
}
elapsed_time_s = min(this_elapsed_time_s,elapsed_time_s);
// Copy output array from GPU to CPU.
//
CE( hipMemcpy
( app.h_out, app.d_out, out_size_bytes,
hipMemcpyDeviceToHost) );
int err_count = 0;
Elt_Type* const out_check =
kernel == 0 ? app.h_out_check : app.h_out_sc_check;
const double tolerance = kernel == 0 ? 1e-5 : 1e-2;
for ( int i=0; i<app.array_size; i++ )
{
if ( out_check[i] == -2 ) continue; // Don't check.
/// SOLUTION -- Problem 3
//
// Skip correctness check if sine execution was
// suppressed by the mask.
//
if ( kernel == 1 && i & app.mask ) continue;
if ( fabs( out_check[i] - app.h_out[i] ) > tolerance )
{
err_count++;
if ( err_count < 5 )
printf("Error at elt %#x: %.7f != %.7f (correct)\n",
i, app.h_out[i], out_check[i] );
}
}
if ( err_count )
printf("Total errors %d\n", err_count);
}
}
}
}
| 9c73f018bed546b68c2d28fa2ef3896d98b4b054.cu | /// LSU EE 7722 GPU Microarchitecture
//
/// Homework 1 SOLUTION - Spring 2016
//
// Assignment: http://www.ece.lsu.edu/koppel/gp/2016/hw01.pdf
// Solution : http://www.ece.lsu.edu/koppel/gp/2016/hw01_sol.pdf
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <ctype.h>
#include <time.h>
#include <new>
#include <cuda_runtime.h>
#include <assert.h>
#include "util.h"
// Make it easy to switch between float and double for vertex and matrix
// elements.
//
typedef float Elt_Type;
struct App
{
int array_size;
// Host pointers to the input and output arrays, and to CPU-computed
// output arrays used for checking results.
//
Elt_Type *h_in, *h_out, *h_out_check;
Elt_Type *h_out_sc_check;
/// Problem 3 -- This might come in handy.
int mask;
// GPU pointers to the input and output arrays.
//
Elt_Type *d_in, *d_out;
};
// In host address space.
App app;
// In device constant address space.
__constant__ App d_app;
extern "C" __global__ void
lane_aligned()
{
/// Problem 1 -- Put solution in this routine.
/// SOLUTION Problem 1
//
// Round the block size down to the largest multiple of 32, and
// use that "useful" block size instead of the real one.
// SOLUTION: Round block dim to largest multiple of 32 <= blockDim.x
//
const int useful_block_dim = blockDim.x & ~ 0x1f;
// SOLUTION: Use useful_block_dim to compute number of threads
//
const int useful_num_threads = useful_block_dim * gridDim.x;
// SOLUTION: Just return if we are beyond the useful part.
//
if ( threadIdx.x >= useful_block_dim ) return;
const int tid = threadIdx.x + blockIdx.x * useful_block_dim;
const int lane = threadIdx.x & 0x1f;
const int start = tid;
const int stop = d_app.array_size;
// Encode lane number so that it can be written to output array
// elements. The CPU code will use this to check whether any
// partial warps executed. Please don't change the line below,
// that would be lying to the CPU code.
//
const Elt_Type lane_label = 1e-5 * lane;
for ( int i = start; i < stop; i += useful_num_threads )
d_app.d_out[i] = d_app.d_in[i] + lane_label;
}
/// SOLUTION -- Problem 3
//
// Increase number of iterations from 2 to 16 so that computation
// time is a significant portion of execution time.
// Number of iterations used in scheduler_bm.
//
const int iters = 16;
//
// It is important that this value is a compile-time constant.
extern "C" __global__ void
scheduler_bm()
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = blockDim.x * gridDim.x;
const int start = tid;
const int stop = d_app.array_size;
const int inc = num_threads;
for ( int i = start; i < stop; i += inc )
{
Elt_Type accum = d_app.d_in[i];
// SOLUTION -- Problem 3
//
// Use mask to suppress execution of certain threads.
//
if ( ( tid & d_app.mask ) == 0 )
for ( int j=0; j<iters; j++ ) accum = __sinf(accum);
d_app.d_out[i] = accum;
}
}
GPU_Info
print_gpu_and_kernel_info()
{
GPU_Info info;
print_gpu_info();
// Choose GPU 0 because it's usually the better choice.
//
int dev = 0;
CE(cudaSetDevice(dev));
printf("Using GPU %d\n",dev);
info.get_gpu_info(dev);
info.GET_INFO(lane_aligned);
info.GET_INFO(scheduler_bm);
// Print information about kernel.
//
printf("\nCUDA Kernel Resource Usage:\n");
for ( int i=0; i<info.num_kernels; i++ )
{
printf("For %s:\n", info.ki[i].name);
printf(" %6zd shared, %zd const, %zd loc, %d regs; "
"%d max threads per block.\n",
info.ki[i].cfa.sharedSizeBytes,
info.ki[i].cfa.constSizeBytes,
info.ki[i].cfa.localSizeBytes,
info.ki[i].cfa.numRegs,
info.ki[i].cfa.maxThreadsPerBlock);
}
return info;
}
int
main(int argc, char **argv)
{
// Get info about GPU and each kernel.
//
GPU_Info info = print_gpu_and_kernel_info();
const int num_mp = info.cuda_prop.multiProcessorCount;
// Examine argument 1, block count. Default is number of MPs.
//
const int arg1_int = argc < 2 ? num_mp : atoi(argv[1]);
const int num_blocks =
arg1_int == 0 ? num_mp :
arg1_int < 0 ? -arg1_int * num_mp : arg1_int;
// Examine argument 2, number of threads per block.
//
const int thd_per_block_arg = argc < 3 ? 1024 : atoi(argv[2]);
const int thd_per_block_goal =
thd_per_block_arg == 0 ? 1024 : thd_per_block_arg;
const int num_threads = num_blocks * thd_per_block_goal;
const bool vary_warps = thd_per_block_arg == 0;
// Examine argument 3, size of array in MiB. Fractional values okay.
//
app.array_size = argc < 4 ? 1 << 20 : int( atof(argv[3]) * (1<<20) );
if ( num_threads <= 0 || app.array_size <= 0 )
{
printf("Usage: %s [ NUM_CUDA_BLOCKS | -BLOCKS_PER_MP ] [THD_PER_BLOCK] "
"[DATA_SIZE_MiB]\n",
argv[0]);
exit(1);
}
/// SOLUTION -- Problem 3. Get mask value from the command line.
//
app.mask = argc < 5 ? 0 : strtol(argv[4],NULL,0);
const int in_size_bytes = app.array_size * sizeof( app.h_in[0] );
const int out_size_bytes = app.array_size * sizeof( app.h_out[0] );
// Allocate storage for CPU copy of data.
//
app.h_in = new Elt_Type[ app.array_size ];
app.h_out = new Elt_Type[ app.array_size ];
app.h_out_check = new Elt_Type[ app.array_size ];
app.h_out_sc_check = new Elt_Type[ app.array_size ];
// Allocate storage for GPU copy of data.
//
CE( cudaMalloc( &app.d_in, in_size_bytes ) );
CE( cudaMalloc( &app.d_out, out_size_bytes ) );
// Initialize input array and arrays holding correct answers.
//
for ( int i=0; i<app.array_size; i++ )
{
app.h_in[i] = i + 0.01 * ( i & 0x1f );
app.h_out[i] = 0;
app.h_out_check[i] = app.h_in[i] + 0.00001 * ( i & 0x1f );
Elt_Type accum = app.h_in[i];
for ( int j=0; j<iters; j++ ) accum = sin(accum);
// The NVIDIA hardware sin is not accurate for larger values,
// so use a -2 to indicate that GPU output should not be checked
// at this element.
//
app.h_out_sc_check[i] = i < 100000 ? accum : -2;
}
// Amount of data in and out of GPU chip.
const int amt_data_bytes = in_size_bytes + out_size_bytes;
double elapsed_time_s = 86400; // Reassigned to minimum run time.
{
// Prepare events used for timing.
//
cudaEvent_t gpu_start_ce, gpu_stop_ce;
CE(cudaEventCreate(&gpu_start_ce));
CE(cudaEventCreate(&gpu_stop_ce));
// Copy input array from CPU to GPU.
//
CE( cudaMemcpy
( app.d_in, app.h_in, in_size_bytes, cudaMemcpyHostToDevice ) );
// Copy App structure to GPU.
//
CE( cudaMemcpyToSymbol
( d_app, &app, sizeof(app), 0, cudaMemcpyHostToDevice ) );
// Launch kernel multiple times and keep track of the best time.
printf("\nLaunching with %d blocks of up to %d threads for %d elts "
"and mask %#x.\n",
num_blocks, thd_per_block_goal, app.array_size, app.mask);
for ( int kernel = 0; kernel < info.num_kernels; kernel++ )
{
cudaFuncAttributes& cfa = info.ki[kernel].cfa;
const int wp_limit = cfa.maxThreadsPerBlock >> 5;
const int thd_limit = wp_limit << 5;
const int thd_per_block_no_vary = min(thd_per_block_goal,thd_limit);
const int wp_start = 4;
const int wp_stop = vary_warps ? wp_limit : wp_start;
const int wp_inc = 4;
for ( int wp_cnt = wp_start; wp_cnt <= wp_stop; wp_cnt += wp_inc )
{
const int thd_per_block =
vary_warps ? wp_cnt << 5 : thd_per_block_no_vary;
// Zero the output array.
//
CE( cudaMemset(app.d_out,0,out_size_bytes) );
// Measure execution time starting "now", which is after data
// set to GPU.
//
CE(cudaEventRecord(gpu_start_ce,0));
// Launch Kernel
//
info.ki[kernel].func_ptr <<< num_blocks, thd_per_block >>>();
//
// Confused?
//
// info.ki[kernel].func_ptr holds a pointer to the kernel
// routine.
// Stop measuring execution time now, which is before is data
// returned from GPU.
//
CE(cudaEventRecord(gpu_stop_ce,0));
CE(cudaEventSynchronize(gpu_stop_ce));
float cuda_time_ms = -1.1;
CE(cudaEventElapsedTime(&cuda_time_ms,gpu_start_ce,gpu_stop_ce));
const double this_elapsed_time_s = cuda_time_ms * 0.001;
const double thpt_data_gbps =
amt_data_bytes / this_elapsed_time_s * 1e-9;
if ( vary_warps )
{
const char* const stars = "********************************************************************************";
const int stars_len = 80;
const double bw_frac =
1e9 * thpt_data_gbps / info.chip_bw_Bps;
const int max_st_len = 52; // Maximum stars length.
// Number of warps, rounded up.
//
const int wps = ( thd_per_block + 31 ) >> 5;
/// Problem 2 Solution Goes Around Here
// The maximum number of active blocks per MP for this
// kernel when launched with a block size of thd_per_block.
//
const int max_bl_per_mp =
info.get_max_active_blocks_per_mp(kernel,thd_per_block);
/// Problem 2: Assign appropriate value.
/// SOLUTION - Problem 2
//
// Compute number of blocks available per MP based only on
// the number of blocks. This may be larger than the
// number of blocks that can run.
//
const int bl_per_mp_available =
0.999 + double(num_blocks) / num_mp;
// The number of active blocks is the minimum of what
// can fit and how many are available.
//
const int bl_per_mp =
min( bl_per_mp_available, max_bl_per_mp );
// Based on the number of blocks, compute the num ber of warps.
//
const int act_wps = wps * bl_per_mp;
if ( wp_cnt == wp_start )
printf("Kernel %s:\n", info.ki[kernel].name);
printf("%2d wp %2d acwp %3.0f µs %#4x %5.0f GB/s %s\n",
(thd_per_block + 31 ) >> 5,
act_wps,
this_elapsed_time_s * 1e6,
app.mask,
thpt_data_gbps,
&stars[stars_len-int(bw_frac*max_st_len)]
);
} else {
printf("K %-15s %2d wp %11.3f µs %8.3f GB/s\n",
info.ki[kernel].name,
(thd_per_block + 31 ) >> 5,
this_elapsed_time_s * 1e6,
thpt_data_gbps);
}
elapsed_time_s = min(this_elapsed_time_s,elapsed_time_s);
// Copy output array from GPU to CPU.
//
CE( cudaMemcpy
( app.h_out, app.d_out, out_size_bytes,
cudaMemcpyDeviceToHost) );
int err_count = 0;
Elt_Type* const out_check =
kernel == 0 ? app.h_out_check : app.h_out_sc_check;
const double tolerance = kernel == 0 ? 1e-5 : 1e-2;
for ( int i=0; i<app.array_size; i++ )
{
if ( out_check[i] == -2 ) continue; // Don't check.
/// SOLUTION -- Problem 3
//
// Skip correctness check if sine execution was
// suppressed by the mask.
//
if ( kernel == 1 && i & app.mask ) continue;
if ( fabs( out_check[i] - app.h_out[i] ) > tolerance )
{
err_count++;
if ( err_count < 5 )
printf("Error at elt %#x: %.7f != %.7f (correct)\n",
i, app.h_out[i], out_check[i] );
}
}
if ( err_count )
printf("Total errors %d\n", err_count);
}
}
}
}
|
18d036a25afd9b388eaeb890913dc4aaf91672ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cmath>
#include<cassert>
#include<cstdio>
#include<memory>
#include<helper_cuda.h>
//#include<cuda_runtime.h>
#include<hip/hip_vector_types.h>
using namespace std;
//#ifndef MAX
//#define MAX(a,b) (a>b ?a:b)
//#endif
//cppcppc
extern "C" void
computeGold(std::unique_ptr<char[]>&ref, char *idata, unsigned int const len);
extern "C" void
computeGold2(std::unique_ptr<int2[]>&ref, int2 *idata, unsigned int const len);
__global__ void
kernel(int *g_data){
unsigned int const tid = threadIdx.x;
//charint4char
int data = g_data[tid];
/*
use integer arithmetic to process all four bytes with one thread
this serializes the execution, but is the simplest solutions to avoid
bank conflicts for this very low number of threads
in general it is more efficient to process each byte by a separate thread,
to avoid bank conflicts the access pattern should be
g_data[4 * wtid + wid], where wtid is the **thread id within the half warp**
and **wid is the warp id**
see also the programming guide for a more in depth discussion.
*/
//sharedbank conflict
//8bit(data<<x)>>24;
g_data[tid] = ( (data<<0)>>24 - 10 )<<24 |
( (data<<8)>>24 - 10 )<<16 |
( (data<<16)>>24 - 10)<<8 |
( (data<24)>>24 - 10)<<0 ;
}
__global__ void
kernel2(int2 *g_data){
unsigned int const tid = threadIdx.x;
int2 data = g_data[tid];
//bbkernel
g_data[tid].x = data.x - data.y;
}
extern "C" bool
runTest(int const argc, char const *argv[], char *data,
int2 *data_int2, unsigned int len){
//4
assert(0 == (len%4) );
unsigned int const num_threads = len / 4;
unsigned int const mem_size = sizeof(char)*len;
unsigned int const mem_size_int2 = sizeof(int2)*len;
//device mem
//TODO:unique_ptrcudaMalloc
char *d_data;
checkCudaErrors(hipMalloc((void**)&d_data,mem_size));
checkCudaErrors(hipMemcpy(d_data, data, mem_size, hipMemcpyHostToDevice));
//for int2 version
int2 *d_data_int2;
checkCudaErrors(hipMalloc((void**)&d_data_int2, mem_size_int2));
checkCudaErrors(hipMemcpy(d_data_int2, data_int2,
mem_size_int2,hipMemcpyHostToDevice));
dim3 grids(1,1,1);
dim3 blocks(num_threads,1,1);//int4char;16/4
dim3 blocks2(len,1,1);//int22int ;16
hipLaunchKernelGGL(( kernel), dim3(grids),dim3(blocks), 0, 0, reinterpret_cast<int *>(d_data));
hipLaunchKernelGGL(( kernel2), dim3(grids),dim3(blocks2), 0, 0, d_data_int2);
//TODO:
//
//checkCudaErrors(getLastCudaError("kernel execution failed"));
//getLastCudaError("kernel execution failed");
checkCudaErrors(hipDeviceSynchronize());
unique_ptr<char[]> ref{new char[mem_size]};
computeGold(ref, data, len);
unique_ptr<int2[]> ref2{new int2[mem_size_int2]};
computeGold2(ref2, data_int2, len);
checkCudaErrors(hipMemcpy(data,d_data,mem_size,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(data_int2, d_data_int2,mem_size_int2,
hipMemcpyDeviceToHost));
bool flag = true;
for(unsigned int i=0; i<len; i++){
if(ref[i] != data[i] ||
ref2[i].x != data_int2[i].x ||
ref2[i].y != data_int2[i].y)
flag = false;
}
checkCudaErrors(hipFree(d_data));
checkCudaErrors(hipFree(d_data_int2));
return flag;
}
| 18d036a25afd9b388eaeb890913dc4aaf91672ea.cu | #include<cmath>
#include<cassert>
#include<cstdio>
#include<memory>
#include<helper_cuda.h>
//#include<cuda_runtime.h>
#include<vector_types.h>
using namespace std;
//#ifndef MAX
//#define MAX(a,b) (a>b ?a:b)
//#endif
//在cpp文件中定义,并且在cpp中输出的时候也需要c风格形式
extern "C" void
computeGold(std::unique_ptr<char[]>&ref, char *idata, unsigned int const len);
extern "C" void
computeGold2(std::unique_ptr<int2[]>&ref, int2 *idata, unsigned int const len);
__global__ void
kernel(int *g_data){
unsigned int const tid = threadIdx.x;
//之前是char类型,这里是int类型,也就是一次读取4个char
int data = g_data[tid];
/*
use integer arithmetic to process all four bytes with one thread
this serializes the execution, but is the simplest solutions to avoid
bank conflicts for this very low number of threads
in general it is more efficient to process each byte by a separate thread,
to avoid bank conflicts the access pattern should be
g_data[4 * wtid + wid], where wtid is the **thread id within the half warp**
and **wid is the warp id**
see also the programming guide for a more in depth discussion.
*/
//又不是从shared部分取数据,哪门子的bank conflict啊?
//提取从左到右的8bit,(data<<x)>>24;
g_data[tid] = ( (data<<0)>>24 - 10 )<<24 |
( (data<<8)>>24 - 10 )<<16 |
( (data<<16)>>24 - 10)<<8 |
( (data<24)>>24 - 10)<<0 ;
}
__global__ void
kernel2(int2 *g_data){
unsigned int const tid = threadIdx.x;
int2 data = g_data[tid];
//这里又bb一通上面kernel里面的话的,都怀疑是复制过来的
g_data[tid].x = data.x - data.y;
}
extern "C" bool
runTest(int const argc, char const *argv[], char *data,
int2 *data_int2, unsigned int len){
//检测是否刚好是4的倍数
assert(0 == (len%4) );
unsigned int const num_threads = len / 4;
unsigned int const mem_size = sizeof(char)*len;
unsigned int const mem_size_int2 = sizeof(int2)*len;
//device mem
//TODO:设计一个类似unique_ptr的模板,然后来封装cudaMalloc
char *d_data;
checkCudaErrors(cudaMalloc((void**)&d_data,mem_size));
checkCudaErrors(cudaMemcpy(d_data, data, mem_size, cudaMemcpyHostToDevice));
//for int2 version
int2 *d_data_int2;
checkCudaErrors(cudaMalloc((void**)&d_data_int2, mem_size_int2));
checkCudaErrors(cudaMemcpy(d_data_int2, data_int2,
mem_size_int2,cudaMemcpyHostToDevice));
dim3 grids(1,1,1);
dim3 blocks(num_threads,1,1);//对于int类型,4个char一个线程;16/4
dim3 blocks2(len,1,1);//对于int2类型,2个int 一个线程;16
kernel<<<grids,blocks>>>(reinterpret_cast<int *>(d_data));
kernel2<<<grids,blocks2>>>(d_data_int2);
//TODO:
// 不知道为什么下面这句话报错
//checkCudaErrors(getLastCudaError("kernel execution failed"));
//getLastCudaError("kernel execution failed");
checkCudaErrors(cudaDeviceSynchronize());
unique_ptr<char[]> ref{new char[mem_size]};
computeGold(ref, data, len);
unique_ptr<int2[]> ref2{new int2[mem_size_int2]};
computeGold2(ref2, data_int2, len);
checkCudaErrors(cudaMemcpy(data,d_data,mem_size,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(data_int2, d_data_int2,mem_size_int2,
cudaMemcpyDeviceToHost));
bool flag = true;
for(unsigned int i=0; i<len; i++){
if(ref[i] != data[i] ||
ref2[i].x != data_int2[i].x ||
ref2[i].y != data_int2[i].y)
flag = false;
}
checkCudaErrors(cudaFree(d_data));
checkCudaErrors(cudaFree(d_data_int2));
return flag;
}
|
d4f8945902633786059cc53b31d4a6cc4429e8dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/opencv.hpp>
#include <vector>
__global__ void blur ( unsigned char * data, unsigned char * out, std::size_t cols, std::size_t rows) {
//auto i = blockIdx.x * (blockDim.x - 2) + threadIdx.x;
//auto j = blockIdx.y * blockDim.y + threadIdx.y;
auto i = blockIdx.x * (blockDim.x) + threadIdx.x;
auto j = blockIdx.y * (blockDim.y) + threadIdx.y;
if ( i > 0 && i < (cols - 1) && j > 0 && j < (rows - 1)) {
for (auto c = 0; c < 3; ++c){
auto gu = data[((j - 1) * cols + i - 1) * 3 + c] + data[((j - 1) * cols + i + 1) * 3 + c]
+ data[( j * cols + i - 1) * 3 + c] + data[( j * cols + i + 1) * 3 + c]
+ data[((j + 1) * cols + i - 1) * 3 + c] + data[((j + 1) * cols + i + 1) * 3 + c]
+ data[(( j - 1) * cols + i) * 3 + c] + data[( j * cols + i) * 3 + c]
+ data[(( j + 1) * cols + i) * 3 + c];
out[(j * cols + i) * 3 + c] = (gu / 9);
}
}
}
int main()
{
cv::Mat m_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED );
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
std::vector< unsigned char > g( 3 * rows * cols );
cv::Mat m_out( rows, cols, CV_8UC3, g.data() );
unsigned char * rgb_d;
unsigned char * out;
std::size_t size = 3 * m_in.cols * m_in.rows;
// hipHostRegister(g.data(), size, hipHostRegisterDefault);
hipMalloc( &rgb_d, 3 * rows * cols);
hipMalloc( &out, 3 * rows * cols );
// Streams declaration.
hipStream_t streams[ 2 ];
// Creation.
hipStreamCreate( &streams[ 0 ] );
hipStreamCreate( &streams[ 1 ] );
hipMemcpyAsync( rgb_d, rgb, size/2, hipMemcpyHostToDevice, streams[ 0 ] );
hipMemcpyAsync( rgb_d+size/2, rgb+size/2, size/2, hipMemcpyHostToDevice, streams[ 1 ] );
dim3 t( 32, 32 );
dim3 be( 3 * (( cols ) / ((t.x - 2) + 1) ), (( rows ) / ((t.y - 2) + 1) ));
// dim3 t( 16, 16 );
// dim3 be( 3 * 2 * (( cols ) / ((t.x - 2) + 1) ), (2 * ( rows ) / ((t.y - 2) + 1) ));
// dim3 t( 4, 4 );
// dim3 be( 3 * 8 * (( cols ) / ((t.x - 2) + 1) ), (8 * ( rows ) / ((t.y - 2) + 1) ));
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start );
// One kernel is launched in each stream.
hipLaunchKernelGGL(( blur), dim3(be), dim3(t), 0, streams[ 0 ] , rgb_d, out, cols, rows / 2 + 2);
hipLaunchKernelGGL(( blur), dim3(be), dim3(t), 0, streams[ 1 ] , rgb_d+size/2, out+size/2, cols, rows / 2 + 2);
// Sending back the resulting vector by halves.
hipMemcpyAsync( g.data(), out, size/2, hipMemcpyDeviceToHost, streams[ 0 ] );
hipMemcpyAsync( g.data()+size/2, out+size/2, size/2, hipMemcpyDeviceToHost, streams[ 1 ] );
// Synchronize everything.
hipDeviceSynchronize();
// Destroy streams.
hipStreamDestroy(streams[0]);
hipStreamDestroy(streams[1]);
auto hipError_t = hipGetLastError();
// Si pas d'erreur dtecte dans le bordel ben on aura hipSuccess
if (hipError_t != hipSuccess){
std::cout << hipGetErrorName(hipError_t) << std::endl;
std::cout << hipGetErrorString(hipError_t) << std::endl;
}
else {
std::cout << "Aucune erreur" << std::endl;
}
hipEventRecord( stop );
hipEventSynchronize( stop );
float duration = 0.0f;
hipEventElapsedTime( &duration, start, stop );
std::cout << "Total: " << duration << "ms\n";
cv::imwrite( "outBlur.jpg", m_out );
hipFree( rgb_d);
//hipFree( g_d);
hipFree ( out);
return 0;
}
| d4f8945902633786059cc53b31d4a6cc4429e8dc.cu | #include <opencv2/opencv.hpp>
#include <vector>
__global__ void blur ( unsigned char * data, unsigned char * out, std::size_t cols, std::size_t rows) {
//auto i = blockIdx.x * (blockDim.x - 2) + threadIdx.x;
//auto j = blockIdx.y * blockDim.y + threadIdx.y;
auto i = blockIdx.x * (blockDim.x) + threadIdx.x;
auto j = blockIdx.y * (blockDim.y) + threadIdx.y;
if ( i > 0 && i < (cols - 1) && j > 0 && j < (rows - 1)) {
for (auto c = 0; c < 3; ++c){
auto gu = data[((j - 1) * cols + i - 1) * 3 + c] + data[((j - 1) * cols + i + 1) * 3 + c]
+ data[( j * cols + i - 1) * 3 + c] + data[( j * cols + i + 1) * 3 + c]
+ data[((j + 1) * cols + i - 1) * 3 + c] + data[((j + 1) * cols + i + 1) * 3 + c]
+ data[(( j - 1) * cols + i) * 3 + c] + data[( j * cols + i) * 3 + c]
+ data[(( j + 1) * cols + i) * 3 + c];
out[(j * cols + i) * 3 + c] = (gu / 9);
}
}
}
int main()
{
cv::Mat m_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED );
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
std::vector< unsigned char > g( 3 * rows * cols );
cv::Mat m_out( rows, cols, CV_8UC3, g.data() );
unsigned char * rgb_d;
unsigned char * out;
std::size_t size = 3 * m_in.cols * m_in.rows;
// cudaHostRegister(g.data(), size, cudaHostRegisterDefault);
cudaMalloc( &rgb_d, 3 * rows * cols);
cudaMalloc( &out, 3 * rows * cols );
// Streams declaration.
cudaStream_t streams[ 2 ];
// Creation.
cudaStreamCreate( &streams[ 0 ] );
cudaStreamCreate( &streams[ 1 ] );
cudaMemcpyAsync( rgb_d, rgb, size/2, cudaMemcpyHostToDevice, streams[ 0 ] );
cudaMemcpyAsync( rgb_d+size/2, rgb+size/2, size/2, cudaMemcpyHostToDevice, streams[ 1 ] );
dim3 t( 32, 32 );
dim3 be( 3 * (( cols ) / ((t.x - 2) + 1) ), (( rows ) / ((t.y - 2) + 1) ));
// dim3 t( 16, 16 );
// dim3 be( 3 * 2 * (( cols ) / ((t.x - 2) + 1) ), (2 * ( rows ) / ((t.y - 2) + 1) ));
// dim3 t( 4, 4 );
// dim3 be( 3 * 8 * (( cols ) / ((t.x - 2) + 1) ), (8 * ( rows ) / ((t.y - 2) + 1) ));
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start );
// One kernel is launched in each stream.
blur<<< be, t, 0, streams[ 0 ] >>>( rgb_d, out, cols, rows / 2 + 2);
blur<<< be, t, 0, streams[ 1 ] >>>( rgb_d+size/2, out+size/2, cols, rows / 2 + 2);
// Sending back the resulting vector by halves.
cudaMemcpyAsync( g.data(), out, size/2, cudaMemcpyDeviceToHost, streams[ 0 ] );
cudaMemcpyAsync( g.data()+size/2, out+size/2, size/2, cudaMemcpyDeviceToHost, streams[ 1 ] );
// Synchronize everything.
cudaDeviceSynchronize();
// Destroy streams.
cudaStreamDestroy(streams[0]);
cudaStreamDestroy(streams[1]);
auto cudaError = cudaGetLastError();
// Si pas d'erreur détectée dans le bordel ben on aura cudaSuccess
if (cudaError != cudaSuccess){
std::cout << cudaGetErrorName(cudaError) << std::endl;
std::cout << cudaGetErrorString(cudaError) << std::endl;
}
else {
std::cout << "Aucune erreur" << std::endl;
}
cudaEventRecord( stop );
cudaEventSynchronize( stop );
float duration = 0.0f;
cudaEventElapsedTime( &duration, start, stop );
std::cout << "Total: " << duration << "ms\n";
cv::imwrite( "outBlur.jpg", m_out );
cudaFree( rgb_d);
//cudaFree( g_d);
cudaFree ( out);
return 0;
}
|
53506c9293d501d2b786b7a66aa6edf849ab742f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N (33*1024)
__global__ void add(int *a, int *b, int *c)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
c[tid] = a[tid] + b[tid]; // add as long as it is smaller than input vector.,
tid += gridDim.x * blockDim.x; // No. of threads * No. blocks stride size.
}
}
int main()
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
int stat;
int errorSumCount;
// Start allocating memory for 3 vectors in GPU.
stat = hipMalloc((void**)&dev_a, N * sizeof(int));
stat = hipMalloc((void**)&dev_b, N * sizeof(int));
stat = hipMalloc((void**)&dev_c, N * sizeof(int));
// Construct vectors values for a and b vector.
for (int i = 0; i < N; i++) {
a[i] = -i;
b[i] = i*i;
}
// Copy the summing vectors to device.
hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add) , dim3(128), dim3(256), 0, 0, dev_a, dev_b, dev_c);
// Copy the summed vector back to host.
hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost);
// Print the vector now.
errorSumCount = 0;
for (int i = 0; i < N; i++) {
printf("\n%d: %d + %d = %d", i, a[i], b[i], c[i]);
if (a[i] + b[i] != c[i])
errorSumCount++;
}
printf("\nTotal iterations: %d", N);
printf("\nTotal sum error: %d", errorSumCount);
printf("\nTotal successful sums: %d", N - errorSumCount);
// Release device memory.
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
getchar();
return 0;
}
| 53506c9293d501d2b786b7a66aa6edf849ab742f.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N (33*1024)
__global__ void add(int *a, int *b, int *c)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
c[tid] = a[tid] + b[tid]; // add as long as it is smaller than input vector.,
tid += gridDim.x * blockDim.x; // No. of threads * No. blocks stride size.
}
}
int main()
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
int stat;
int errorSumCount;
// Start allocating memory for 3 vectors in GPU.
stat = cudaMalloc((void**)&dev_a, N * sizeof(int));
stat = cudaMalloc((void**)&dev_b, N * sizeof(int));
stat = cudaMalloc((void**)&dev_c, N * sizeof(int));
// Construct vectors values for a and b vector.
for (int i = 0; i < N; i++) {
a[i] = -i;
b[i] = i*i;
}
// Copy the summing vectors to device.
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
add <<<128, 256>>> (dev_a, dev_b, dev_c);
// Copy the summed vector back to host.
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
// Print the vector now.
errorSumCount = 0;
for (int i = 0; i < N; i++) {
printf("\n%d: %d + %d = %d", i, a[i], b[i], c[i]);
if (a[i] + b[i] != c[i])
errorSumCount++;
}
printf("\nTotal iterations: %d", N);
printf("\nTotal sum error: %d", errorSumCount);
printf("\nTotal successful sums: %d", N - errorSumCount);
// Release device memory.
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
getchar();
return 0;
}
|
a66c8aad998ea630b7b3c1d973f2a4b8c392eac2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <string.h>
#include <time.h>
#include <omp.h>
#include <math.h>
#include "NBody.h"
#include "NBodyVisualiser.h"
#define USER_NAME "aca15a" //username
#define THREADS_PER_BLOCK 256
#define SQRT_THREADS_PER_BLOCK sqrt(THREADS_PER_BLOCK)
//FUNCTION DEFINITIONS
void checkCUDAErrors(const char*);
void print_help();
//step function for OMP mode
__global__ void kernelStep_SOA(nbody_soa* d_body, float* g, float* Fx, float*Fy, unsigned int d_D, unsigned int d_N);
__global__ void grid_SOA(nbody_soa* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N);
__global__ void kernelStep_AOS(nbody* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N);
__global__ void grid_AOS(nbody* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N);
__global__ void kernelStep_SOA_Interactions(nbody_soa* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N);
void CUDAstep();
float* Fx;
float* Fy;
//GLOBAL VARIABLES
//create pointer to nbody structure to store bodies
struct nbody* body;
nbody_soa* d_body;
__device__ unsigned int d_N;
__device__ unsigned int d_D;
//__constant__ unsigned int d_N;
//__constant__ unsigned int d_D;
//nbody* d_body;
float* grid;
float* d_grid;
//initialise N D I as global to be used by step()
unsigned int N;
unsigned int D;
static unsigned int I;
//1D heatmap grid
//Define grid as d_body global variable
//MAIN//
int main(int argc, char* argv[]) {
//use time to create d_body random seed
srand((unsigned int)time(NULL)); // randomness initialization
if (argc < 4) {
exit(1);
}
//Checks if N is an int and also if it is positive
//atoi will return 0 if it is not an int
if (atoi(argv[1]) <= 0) {
printf("The value for N is not valid, please enter d_body positive integer \n");
exit(1);
}
else {
N = atoi(argv[1]);
}
//assign memmory needed for number of bodies
body = (struct nbody*)malloc(sizeof(struct nbody) * N);
if (body == NULL) {
fprintf(stderr, "malloc failed\n");
return -1;
}
printf("N = %d \n", N);
//Checks if D is an int and also if it is positive
if (atoi(argv[2]) <= 0) {
printf("The value for D is not valid, please enter d_body positive integer \n");
exit(1);
}
else {
D = atoi(argv[2]);
}
//assign memmory needed for dimensions of 1D grid
grid = (float*)malloc(sizeof(float) * D * D);
printf("D = %d \n", D);
enum MODE mode;
char* str;
str = argv[3];
if (strcmp(str, "CPU") == 0) {
mode = CPU;
}
else if (strcmp(str, "OPENMP") == 0) {
mode = OPENMP;
}
else if (strcmp(str, "CUDA") == 0) {
mode = CUDA;
}
else {
printf("The value for M is not valid, please enter CPU or OPENMP or CUDA \n");
exit(1);
}
printf("Mode = %s \n", str);
I = NULL;
unsigned int n = 0;
//check for the optional arguments
for (int i = 4; i < argc; i += 2) {
str = argv[i];
if (strcmp(str, "-i") == 0) {
I = atoi(argv[i + 1]);
printf("I = %d \n", I);
}
if (strcmp(str, "-f") == 0) {
FILE* f = NULL;
str = argv[i + 1];
f = fopen(str, "r");
if (f == NULL) {
fprintf(stderr, "Error: Could not find file \n");
exit(1);
}
char buffer[200];
//read single body data
//run loop until end of input or nbodys
while (!feof(f) && !(N == n)) {
//get the input using fgets
fgets(buffer, 200, f);
//if line starts with # ignore
if (buffer[0] == '#' || isspace(buffer[0])) {
continue;
}
//printf("\n %s",buffer);
//read 5 floats and skip "," and whitespace characters with %*c
sscanf(buffer, "%f %*c%*c %f %*c%*c %f %*c%*c %f %*c%*c %f", &body[n].x, &body[n].y, &body[n].vx, &body[n].vy, &body[n].m);
if (body[n].x < 0) {
body[n].x = ((float)rand()) / RAND_MAX; // returns d_body random integer between 0 and 1 for any invalid input
}
if (body[n].x < 0) {
body[n].y = ((float)rand()) / RAND_MAX; // returns d_body random integer between 0 and 1 for any invalid input
}
if (body[n].vx < 0) {
body[n].vx = 0.0f; // returns 0 for vx for any invalid input
}
if (body[n].vy < 0) {
body[n].vy = 0.0f; // returns 0 for vy for any invalid input
}
if (body[n].m <= 0) { //mass can not be 0
body[n].m = (float)1 / N; // returns m as 1/n for any invalid input
}
//printf("\n body %d: x=%f, y=%f, vx=%f, vy=%f, m=%f", n, body[n].x, body[n].y, body[n].vx, body[n].vy, body[n].m);
n++;
}
fclose(f);
}
}
//if more bodies are needed
for (unsigned int i = n; i < N; i++) {
body[i].x = ((float)rand()) / RAND_MAX; // Returns d_body random integer between 0 and 1
body[i].y = ((float)rand()) / RAND_MAX; // Returns d_body random integer between 0 and 1
body[i].vx = 0.0f; // Returns 0 for vx
body[i].vy = 0.0f; // Returns 0 for vy
body[i].m = (float)1 / N; // Returns m as 1/N
//printf("\n body %d: x=%f, y=%f, vx=%f, vy=%f, m=%f", i, body[i].x, body[i].y, body[i].vx, body[i].vy, body[i].m);
}
if (mode == CUDA) {
//Size of each float array
unsigned int size = N * sizeof(float);
//CPU soa
nbody_soa* n_body = (nbody_soa*)malloc(sizeof(nbody_soa));
//allocate memmory for CPU soa
n_body->x = (float*)malloc(size);
n_body->y = (float*)malloc(size);
n_body->vx = (float*)malloc(size);
n_body->vy = (float*)malloc(size);
n_body->m = (float*)malloc(size);
//populate soa
for (int i = 0; i < N; i++) {
n_body->x[i] = body[i].x;
n_body->y[i] = body[i].y;
n_body->vx[i] = body[i].vx;
n_body->vy[i] = body[i].vy;
n_body->m[i] = body[i].m;
}
//Intermediate soa
nbody_soa* h_body = (nbody_soa*)malloc(sizeof(nbody_soa));
//Allocate memory on GPU for intermediate float pointers and copy the information
hipMalloc((void**)&h_body->x, size);
hipMemcpy(h_body->x, n_body->x, size, hipMemcpyHostToDevice);
hipMalloc((void**)&h_body->y, size);
hipMemcpy(h_body->y, n_body->y, size, hipMemcpyHostToDevice);
hipMalloc((void**)&h_body->vx, size);
hipMemcpy(h_body->vx, n_body->vx, size, hipMemcpyHostToDevice);
hipMalloc((void**)&h_body->vy, size);
hipMemcpy(h_body->vy, n_body->vy, size, hipMemcpyHostToDevice);
hipMalloc((void**)&h_body->m, size);
hipMemcpy(h_body->m, n_body->m, size, hipMemcpyHostToDevice);
//Allocate memory on GPU for device soa and copy the information
hipMalloc(&(d_body), sizeof(nbody_soa));
hipMemcpy(d_body, h_body, sizeof(nbody_soa), hipMemcpyHostToDevice);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*NBODY AOS*/
//hipMalloc((void**)&(d_body), N * sizeof(nbody));
//hipMemcpy(d_body, body, N * sizeof(nbody), hipMemcpyHostToDevice);
hipMalloc((void**)&(d_grid), sizeof(grid));
hipMemcpy(d_grid, grid, sizeof(grid), hipMemcpyHostToDevice);
hipMalloc((void**)&Fx, size);
hipMalloc((void**)&Fy, size);
d_D = D;
d_N = N;
//hipMemcpyToSymbol(d_D, &D, sizeof(unsigned int));
//hipMemcpyToSymbol(d_N, &N, sizeof(unsigned int));
hipEvent_t start, stop;
float milliseconds = 0;
int errors;
//checkCUDAErrors("CUDA malloc");
//checkCUDAErrors("CUDA memcpy");
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
if (I == NULL) {
setActivityMapData(d_grid);
initViewer(N, D, mode, &CUDAstep);
setActivityMapData(d_grid);
//setNBodyPositions(d_body);
setNBodyPositions2f(h_body->x, h_body->y);
startVisualisationLoop();
}
else {
for (unsigned int k = 1; k < I; k++) {
CUDAstep();
}
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
//stop timer
int seconds = milliseconds / 1000;
int mils = (milliseconds-(seconds*1000)) * 100;
printf("Execution time is %d sec %d0.3 ms\n",seconds, mils);
// Cleanup
hipFree(h_body->x);
hipFree(h_body->y);
hipFree(h_body->vx);
hipFree(h_body->vy);
hipFree(h_body->m);
free(h_body);
free(n_body);
}
// Cleanup
hipFree(d_body);
hipFree(d_grid);
hipFree(Fx);
hipFree(Fy);
checkCUDAErrors("CUDA cleanup");
//free any allocated memmory
free(body);
free(grid);
hipDeviceReset();
return 0;
}
void CUDAstep(void) {
dim3 blocksPerGrid(N/32, 1);
dim3 threadsPerBlock(32, 1);
hipLaunchKernelGGL(( kernelStep_SOA) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_body, d_grid,Fx, Fy, d_D, d_N);
//kernelStep_SOA_Interactions << <blocksPerGrid, threadsPerBlock >> > (d_body, d_grid, Fx, Fy, d_D, d_N);
hipDeviceSynchronize();
grid_SOA << < blocksPerGrid, threadsPerBlock >> > (d_body, d_grid,Fx,Fy,d_D,d_N);
//kernelStep_AOS << <blocksPerGrid, threadsPerBlock >> > (d_body, d_grid, d_D, d_N);
//grid_AOS << < blocksPerGrid, threadsPerBlock >> > (d_body, d_grid);
//hipDeviceSynchronize();
}
__global__ void kernelStep_SOA_Interactions(nbody_soa* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i != j && i<d_N && j<d_N){
Fx[i] = 0.0f;
Fy[i] = 0.0f;
float xDiff = d_body->x[j] - d_body->x[i];
float yDiff = d_body->y[j] - d_body->y[i];
float denominator = powf(xDiff * xDiff + yDiff * yDiff + SOFTENING * SOFTENING, (3 / 2));
Fx[i] += d_body->m[j] * xDiff / denominator;
Fy[i] += d_body->m[j] * yDiff / denominator;
Fx[i] *= G * d_body->m[i];
Fy[i] *= G * d_body->m[i];
}
if (i < d_D * d_D) {
g[i] = 0;
}
}
__global__ void kernelStep_SOA(nbody_soa* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<d_N) {
Fx[i] = 0.0f;
Fy[i] = 0.0f;
//#pragma unroll
for (int j = 0; j < d_N; j++) {
if (i == j) continue;
float xDiff = d_body->x[j] - d_body->x[i];
float yDiff = d_body->y[j] - d_body->y[i];
float denominator = powf(xDiff * xDiff + yDiff * yDiff + SOFTENING * SOFTENING, (3 / 2));
Fx[i] += d_body->m[j] * xDiff / denominator;
Fy[i] += d_body->m[j] * yDiff / denominator;
}
Fx[i] *= G * d_body->m[i];
Fy[i] *= G * d_body->m[i];
}
if (i < d_D * d_D) {
g[i] = 0;
}
}
__global__ void grid_SOA(nbody_soa* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
//get x,y positions on the activity heatmap
if (i < d_N) {
//update velocities
d_body->vx[i] += Fx[i] / d_body->m[i] * dt;
d_body->vy[i] += Fy[i] / d_body->m[i] * dt;
//update positions
d_body->x[i] += d_body->vx[i] * dt;
if (d_body->x[i] > 1) d_body->x[i] = 1; //clamp to 1
d_body->y[i] += d_body->vy[i] * dt;
if (d_body->y[i] > 1) d_body->y[i] = 1;
//get grid positions
int pos, posx, posy;
posx = (int)floor((d_body->x[i]) / ((float)1 / d_D));
posy = (int)floor((d_body->y[i]) / ((float)1 / d_D));
if ((posx <= d_D * d_D) && (posx >= 0) && (posy <= d_D * d_D) && (posy >= 0)) {
//convert from 2D positions to 1D
pos = posy + (d_D * posx);
//local atomic add to the grid
atomicAdd(&g[pos], (1.0/(d_N*0.3)));
}
}
}
__global__ void kernelStep_AOS(nbody* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < d_N) {
Fx[i] = 0.0f;
Fy[i] = 0.0f;
//#pragma unroll
for (int j = 0; j < d_N; j++) {
float xDiff = d_body[j].x - d_body[i].x;
float yDiff = d_body[j].y - d_body[i].y;
float denominator = powf(xDiff * xDiff + yDiff * yDiff + SOFTENING * SOFTENING, 3 / 2);
Fx[i] += d_body[j].m * xDiff / denominator;
Fy[i] += d_body[j].m * yDiff / denominator;
}
Fx[i] *= G * d_body[i].m;
Fy[i] *= G * d_body[i].m;
}
if (i < d_D * d_D) {
g[i] = 0;
}
}
__global__ void grid_AOS(nbody* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
d_body[i].vx += (Fx[i] / d_body[i].m) * dt;
d_body[i].vy += (Fy[i] / d_body[i].m) * dt;
d_body[i].x += d_body[i].vx * dt;
d_body[i].y += d_body[i].vy * dt;
//get x,y positions on the activity heatmap
int pos, posx, posy;
posx = (int)floor((d_body[i].x) / ((float)1 / d_D));
posy = (int)floor((d_body[i].y) / ((float)1 / d_D));
if ((posx <= d_D * d_D) && (posx >= 0) && (posy <= d_D * d_D) && (posy >= 0)) {
//convert from 2D positions to 1D
pos = posy + (d_D * posx);
//local atomic add to the grid
atomicAdd(&g[pos], (1.0 / (d_N * 0.3)));
}
}
void checkCUDAErrors(const char* msg)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void print_help() {
printf("nbody_%s N D M [-i I] [-i input_file]\n", USER_NAME);
printf("where:\n");
printf("\tN Is the number of bodies to simulate.\n");
printf("\tD Is the integer dimension of the activity grid. The Grid has D*D locations.\n");
printf("\tM Is the operation mode, either 'CPU' or 'OPENMP'\n");
printf("\t[-i I] Optionally specifies the number of simulation iterations 'I' to perform. Specifying no value will use visualisation mode. \n");
printf("\t[-f input_file] Optionally specifies an input file with an initial N bodies of data. If not specified random data will be created.\n");
} | a66c8aad998ea630b7b3c1d973f2a4b8c392eac2.cu | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <string.h>
#include <time.h>
#include <omp.h>
#include <math.h>
#include "NBody.h"
#include "NBodyVisualiser.h"
#define USER_NAME "aca15a" //username
#define THREADS_PER_BLOCK 256
#define SQRT_THREADS_PER_BLOCK sqrt(THREADS_PER_BLOCK)
//FUNCTION DEFINITIONS
void checkCUDAErrors(const char*);
void print_help();
//step function for OMP mode
__global__ void kernelStep_SOA(nbody_soa* d_body, float* g, float* Fx, float*Fy, unsigned int d_D, unsigned int d_N);
__global__ void grid_SOA(nbody_soa* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N);
__global__ void kernelStep_AOS(nbody* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N);
__global__ void grid_AOS(nbody* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N);
__global__ void kernelStep_SOA_Interactions(nbody_soa* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N);
void CUDAstep();
float* Fx;
float* Fy;
//GLOBAL VARIABLES
//create pointer to nbody structure to store bodies
struct nbody* body;
nbody_soa* d_body;
__device__ unsigned int d_N;
__device__ unsigned int d_D;
//__constant__ unsigned int d_N;
//__constant__ unsigned int d_D;
//nbody* d_body;
float* grid;
float* d_grid;
//initialise N D I as global to be used by step()
unsigned int N;
unsigned int D;
static unsigned int I;
//1D heatmap grid
//Define grid as d_body global variable
//MAIN//
int main(int argc, char* argv[]) {
//use time to create d_body random seed
srand((unsigned int)time(NULL)); // randomness initialization
if (argc < 4) {
exit(1);
}
//Checks if N is an int and also if it is positive
//atoi will return 0 if it is not an int
if (atoi(argv[1]) <= 0) {
printf("The value for N is not valid, please enter d_body positive integer \n");
exit(1);
}
else {
N = atoi(argv[1]);
}
//assign memmory needed for number of bodies
body = (struct nbody*)malloc(sizeof(struct nbody) * N);
if (body == NULL) {
fprintf(stderr, "malloc failed\n");
return -1;
}
printf("N = %d \n", N);
//Checks if D is an int and also if it is positive
if (atoi(argv[2]) <= 0) {
printf("The value for D is not valid, please enter d_body positive integer \n");
exit(1);
}
else {
D = atoi(argv[2]);
}
//assign memmory needed for dimensions of 1D grid
grid = (float*)malloc(sizeof(float) * D * D);
printf("D = %d \n", D);
enum MODE mode;
char* str;
str = argv[3];
if (strcmp(str, "CPU") == 0) {
mode = CPU;
}
else if (strcmp(str, "OPENMP") == 0) {
mode = OPENMP;
}
else if (strcmp(str, "CUDA") == 0) {
mode = CUDA;
}
else {
printf("The value for M is not valid, please enter CPU or OPENMP or CUDA \n");
exit(1);
}
printf("Mode = %s \n", str);
I = NULL;
unsigned int n = 0;
//check for the optional arguments
for (int i = 4; i < argc; i += 2) {
str = argv[i];
if (strcmp(str, "-i") == 0) {
I = atoi(argv[i + 1]);
printf("I = %d \n", I);
}
if (strcmp(str, "-f") == 0) {
FILE* f = NULL;
str = argv[i + 1];
f = fopen(str, "r");
if (f == NULL) {
fprintf(stderr, "Error: Could not find file \n");
exit(1);
}
char buffer[200];
//read single body data
//run loop until end of input or nbodys
while (!feof(f) && !(N == n)) {
//get the input using fgets
fgets(buffer, 200, f);
//if line starts with # ignore
if (buffer[0] == '#' || isspace(buffer[0])) {
continue;
}
//printf("\n %s",buffer);
//read 5 floats and skip "," and whitespace characters with %*c
sscanf(buffer, "%f %*c%*c %f %*c%*c %f %*c%*c %f %*c%*c %f", &body[n].x, &body[n].y, &body[n].vx, &body[n].vy, &body[n].m);
if (body[n].x < 0) {
body[n].x = ((float)rand()) / RAND_MAX; // returns d_body random integer between 0 and 1 for any invalid input
}
if (body[n].x < 0) {
body[n].y = ((float)rand()) / RAND_MAX; // returns d_body random integer between 0 and 1 for any invalid input
}
if (body[n].vx < 0) {
body[n].vx = 0.0f; // returns 0 for vx for any invalid input
}
if (body[n].vy < 0) {
body[n].vy = 0.0f; // returns 0 for vy for any invalid input
}
if (body[n].m <= 0) { //mass can not be 0
body[n].m = (float)1 / N; // returns m as 1/n for any invalid input
}
//printf("\n body %d: x=%f, y=%f, vx=%f, vy=%f, m=%f", n, body[n].x, body[n].y, body[n].vx, body[n].vy, body[n].m);
n++;
}
fclose(f);
}
}
//if more bodies are needed
for (unsigned int i = n; i < N; i++) {
body[i].x = ((float)rand()) / RAND_MAX; // Returns d_body random integer between 0 and 1
body[i].y = ((float)rand()) / RAND_MAX; // Returns d_body random integer between 0 and 1
body[i].vx = 0.0f; // Returns 0 for vx
body[i].vy = 0.0f; // Returns 0 for vy
body[i].m = (float)1 / N; // Returns m as 1/N
//printf("\n body %d: x=%f, y=%f, vx=%f, vy=%f, m=%f", i, body[i].x, body[i].y, body[i].vx, body[i].vy, body[i].m);
}
if (mode == CUDA) {
//Size of each float array
unsigned int size = N * sizeof(float);
//CPU soa
nbody_soa* n_body = (nbody_soa*)malloc(sizeof(nbody_soa));
//allocate memmory for CPU soa
n_body->x = (float*)malloc(size);
n_body->y = (float*)malloc(size);
n_body->vx = (float*)malloc(size);
n_body->vy = (float*)malloc(size);
n_body->m = (float*)malloc(size);
//populate soa
for (int i = 0; i < N; i++) {
n_body->x[i] = body[i].x;
n_body->y[i] = body[i].y;
n_body->vx[i] = body[i].vx;
n_body->vy[i] = body[i].vy;
n_body->m[i] = body[i].m;
}
//Intermediate soa
nbody_soa* h_body = (nbody_soa*)malloc(sizeof(nbody_soa));
//Allocate memory on GPU for intermediate float pointers and copy the information
cudaMalloc((void**)&h_body->x, size);
cudaMemcpy(h_body->x, n_body->x, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&h_body->y, size);
cudaMemcpy(h_body->y, n_body->y, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&h_body->vx, size);
cudaMemcpy(h_body->vx, n_body->vx, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&h_body->vy, size);
cudaMemcpy(h_body->vy, n_body->vy, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&h_body->m, size);
cudaMemcpy(h_body->m, n_body->m, size, cudaMemcpyHostToDevice);
//Allocate memory on GPU for device soa and copy the information
cudaMalloc(&(d_body), sizeof(nbody_soa));
cudaMemcpy(d_body, h_body, sizeof(nbody_soa), cudaMemcpyHostToDevice);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*NBODY AOS*/
//cudaMalloc((void**)&(d_body), N * sizeof(nbody));
//cudaMemcpy(d_body, body, N * sizeof(nbody), cudaMemcpyHostToDevice);
cudaMalloc((void**)&(d_grid), sizeof(grid));
cudaMemcpy(d_grid, grid, sizeof(grid), cudaMemcpyHostToDevice);
cudaMalloc((void**)&Fx, size);
cudaMalloc((void**)&Fy, size);
d_D = D;
d_N = N;
//cudaMemcpyToSymbol(d_D, &D, sizeof(unsigned int));
//cudaMemcpyToSymbol(d_N, &N, sizeof(unsigned int));
cudaEvent_t start, stop;
float milliseconds = 0;
int errors;
//checkCUDAErrors("CUDA malloc");
//checkCUDAErrors("CUDA memcpy");
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
if (I == NULL) {
setActivityMapData(d_grid);
initViewer(N, D, mode, &CUDAstep);
setActivityMapData(d_grid);
//setNBodyPositions(d_body);
setNBodyPositions2f(h_body->x, h_body->y);
startVisualisationLoop();
}
else {
for (unsigned int k = 1; k < I; k++) {
CUDAstep();
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//stop timer
int seconds = milliseconds / 1000;
int mils = (milliseconds-(seconds*1000)) * 100;
printf("Execution time is %d sec %d0.3 ms\n",seconds, mils);
// Cleanup
cudaFree(h_body->x);
cudaFree(h_body->y);
cudaFree(h_body->vx);
cudaFree(h_body->vy);
cudaFree(h_body->m);
free(h_body);
free(n_body);
}
// Cleanup
cudaFree(d_body);
cudaFree(d_grid);
cudaFree(Fx);
cudaFree(Fy);
checkCUDAErrors("CUDA cleanup");
//free any allocated memmory
free(body);
free(grid);
cudaDeviceReset();
return 0;
}
void CUDAstep(void) {
dim3 blocksPerGrid(N/32, 1);
dim3 threadsPerBlock(32, 1);
kernelStep_SOA <<<blocksPerGrid, threadsPerBlock >>> (d_body, d_grid,Fx, Fy, d_D, d_N);
//kernelStep_SOA_Interactions << <blocksPerGrid, threadsPerBlock >> > (d_body, d_grid, Fx, Fy, d_D, d_N);
cudaDeviceSynchronize();
grid_SOA << < blocksPerGrid, threadsPerBlock >> > (d_body, d_grid,Fx,Fy,d_D,d_N);
//kernelStep_AOS << <blocksPerGrid, threadsPerBlock >> > (d_body, d_grid, d_D, d_N);
//grid_AOS << < blocksPerGrid, threadsPerBlock >> > (d_body, d_grid);
//cudaDeviceSynchronize();
}
__global__ void kernelStep_SOA_Interactions(nbody_soa* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i != j && i<d_N && j<d_N){
Fx[i] = 0.0f;
Fy[i] = 0.0f;
float xDiff = d_body->x[j] - d_body->x[i];
float yDiff = d_body->y[j] - d_body->y[i];
float denominator = powf(xDiff * xDiff + yDiff * yDiff + SOFTENING * SOFTENING, (3 / 2));
Fx[i] += d_body->m[j] * xDiff / denominator;
Fy[i] += d_body->m[j] * yDiff / denominator;
Fx[i] *= G * d_body->m[i];
Fy[i] *= G * d_body->m[i];
}
if (i < d_D * d_D) {
g[i] = 0;
}
}
__global__ void kernelStep_SOA(nbody_soa* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<d_N) {
Fx[i] = 0.0f;
Fy[i] = 0.0f;
//#pragma unroll
for (int j = 0; j < d_N; j++) {
if (i == j) continue;
float xDiff = d_body->x[j] - d_body->x[i];
float yDiff = d_body->y[j] - d_body->y[i];
float denominator = powf(xDiff * xDiff + yDiff * yDiff + SOFTENING * SOFTENING, (3 / 2));
Fx[i] += d_body->m[j] * xDiff / denominator;
Fy[i] += d_body->m[j] * yDiff / denominator;
}
Fx[i] *= G * d_body->m[i];
Fy[i] *= G * d_body->m[i];
}
if (i < d_D * d_D) {
g[i] = 0;
}
}
__global__ void grid_SOA(nbody_soa* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
//get x,y positions on the activity heatmap
if (i < d_N) {
//update velocities
d_body->vx[i] += Fx[i] / d_body->m[i] * dt;
d_body->vy[i] += Fy[i] / d_body->m[i] * dt;
//update positions
d_body->x[i] += d_body->vx[i] * dt;
if (d_body->x[i] > 1) d_body->x[i] = 1; //clamp to 1
d_body->y[i] += d_body->vy[i] * dt;
if (d_body->y[i] > 1) d_body->y[i] = 1;
//get grid positions
int pos, posx, posy;
posx = (int)floor((d_body->x[i]) / ((float)1 / d_D));
posy = (int)floor((d_body->y[i]) / ((float)1 / d_D));
if ((posx <= d_D * d_D) && (posx >= 0) && (posy <= d_D * d_D) && (posy >= 0)) {
//convert from 2D positions to 1D
pos = posy + (d_D * posx);
//local atomic add to the grid
atomicAdd(&g[pos], (1.0/(d_N*0.3)));
}
}
}
__global__ void kernelStep_AOS(nbody* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < d_N) {
Fx[i] = 0.0f;
Fy[i] = 0.0f;
//#pragma unroll
for (int j = 0; j < d_N; j++) {
float xDiff = d_body[j].x - d_body[i].x;
float yDiff = d_body[j].y - d_body[i].y;
float denominator = powf(xDiff * xDiff + yDiff * yDiff + SOFTENING * SOFTENING, 3 / 2);
Fx[i] += d_body[j].m * xDiff / denominator;
Fy[i] += d_body[j].m * yDiff / denominator;
}
Fx[i] *= G * d_body[i].m;
Fy[i] *= G * d_body[i].m;
}
if (i < d_D * d_D) {
g[i] = 0;
}
}
__global__ void grid_AOS(nbody* d_body, float* g, float* Fx, float* Fy, unsigned int d_D, unsigned int d_N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
d_body[i].vx += (Fx[i] / d_body[i].m) * dt;
d_body[i].vy += (Fy[i] / d_body[i].m) * dt;
d_body[i].x += d_body[i].vx * dt;
d_body[i].y += d_body[i].vy * dt;
//get x,y positions on the activity heatmap
int pos, posx, posy;
posx = (int)floor((d_body[i].x) / ((float)1 / d_D));
posy = (int)floor((d_body[i].y) / ((float)1 / d_D));
if ((posx <= d_D * d_D) && (posx >= 0) && (posy <= d_D * d_D) && (posy >= 0)) {
//convert from 2D positions to 1D
pos = posy + (d_D * posx);
//local atomic add to the grid
atomicAdd(&g[pos], (1.0 / (d_N * 0.3)));
}
}
void checkCUDAErrors(const char* msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void print_help() {
printf("nbody_%s N D M [-i I] [-i input_file]\n", USER_NAME);
printf("where:\n");
printf("\tN Is the number of bodies to simulate.\n");
printf("\tD Is the integer dimension of the activity grid. The Grid has D*D locations.\n");
printf("\tM Is the operation mode, either 'CPU' or 'OPENMP'\n");
printf("\t[-i I] Optionally specifies the number of simulation iterations 'I' to perform. Specifying no value will use visualisation mode. \n");
printf("\t[-f input_file] Optionally specifies an input file with an initial N bodies of data. If not specified random data will be created.\n");
} |
44808f6c2485fcd8536cbe1c961baf3d59273d54.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <Indice2D.h>
#include <stdio.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n);
__global__ void addVector11(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* output : void required !!
* pattern entrelacement
*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n)
{
const int NB_THREAD = Indice2D::nbThread();
const int TID = Indice2D::tid();
// Debug, facultatif
// if (TID == 0)
// {
// printf("Coucou from device tid = %d", TID); //required Device::synchronize(); after the call of kernel
// }
//TODO addVector pattern entrelacement
int s = TID;
while (s < n)
{
ptrDevW[s] = ptrDevV1[s] + ptrDevV2[s];
s += NB_THREAD;
}
}
/**
* pattern 1<-->1
* hyp: #thread=#caseVecteur=n
*/
__global__ void addVector11(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n)
{
// TODO addVector pattern 1<-->1
const int TID = Indice2D::tid();
ptrDevW[TID] = ptrDevV1[TID] + ptrDevV2[TID];
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 44808f6c2485fcd8536cbe1c961baf3d59273d54.cu | #include <Indice2D.h>
#include <stdio.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n);
__global__ void addVector11(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* output : void required !!
* pattern entrelacement
*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n)
{
const int NB_THREAD = Indice2D::nbThread();
const int TID = Indice2D::tid();
// Debug, facultatif
// if (TID == 0)
// {
// printf("Coucou from device tid = %d", TID); //required Device::synchronize(); after the call of kernel
// }
//TODO addVector pattern entrelacement
int s = TID;
while (s < n)
{
ptrDevW[s] = ptrDevV1[s] + ptrDevV2[s];
s += NB_THREAD;
}
}
/**
* pattern 1<-->1
* hyp: #thread=#caseVecteur=n
*/
__global__ void addVector11(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n)
{
// TODO addVector pattern 1<-->1
const int TID = Indice2D::tid();
ptrDevW[TID] = ptrDevV1[TID] + ptrDevV2[TID];
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
6a8043d462fdc54fbb2a5fdf632e8fd5d2beadd0.hip | // !!! This is a file automatically generated by hipify!!!
//=======================================================================
//
// Name : Finite Volume Nonlinear Acoustics GPU Implementation (FiVoNAGI)
//
// Authors : Roberto Velasco Segura and Pablo L. Rend\'on
//
// License : see licence.txt in the root directory of the repository.
//
//=======================================================================
// use 1 for single precision and 2 for double precision
#ifndef PRECISION
#define PRECISION 1
#endif /* PRECISION */
#if PRECISION == 1
#define DATATYPEV float
#define DATATYPET float
#elif PRECISION == 2
#define DATATYPEV double
#define DATATYPET int2
#else /* PRECISION value */
# error unresolved PRECISION value
#endif /* PRECISION value */
#ifndef UNINCLUDE
#include "numbers.h"
#include "parameters.h"
#include "data_definitions.h"
#include "init.h"
#include "boundary.h"
#include "filter.h"
#include "draw_float_cut.h"
#include "data_export.h"
#include "data_collect.h"
#include "source.h"
#include "fv.h"
// cuda
#include "hip/hip_runtime.h"
#include "../nv/cpu_anim.h"
// system
#include <iostream>
#include <vector>
#include <string>
#include <fstream>
#include <math.h>
#include <time.h> // just for the sleep function
#include <iomanip>
#include <hipfft.h>
#endif /* UNINCLUDE */
#ifndef DEBUG
#define DEBUG 0
#endif /* DEBUG */
#include "debug_tools.h"
using namespace std;
// TODO: place the following two functions in a separate file. This is
// needed to include this file as dependence of other h files.
template <typename TTT>
__forceinline__ __device__ TTT getmax(TTT x, TTT y)
{ return (x > y)?x:y; }
template <typename TTT>
__forceinline__ __device__ TTT getmin(TTT x, TTT y)
{ return (x < y)?x:y; }
template <typename TTT>
__global__ void texCpy
(TTT *u1W, TTT *u2W, TTT *u3W, bool dstOut) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int offset = i + j * blockDim.x * gridDim.x;
// this is to avoid writting outside the arrays
if (i > NX-1 || j > NY-1) return;
if (dstOut) {
u1W[offset] = texAu1_read(&i,&j); //CB:rho1
u2W[offset] = texAu2_read(&i,&j);
u3W[offset] = texAu3_read(&i,&j);
} else {
u1W[offset] = texBu1_read(&i,&j); //CB:rho1
u2W[offset] = texBu2_read(&i,&j);
u3W[offset] = texBu3_read(&i,&j);
}
}
template <typename TTT>
__global__ void restore
(TTT *u1W, TTT *u2W, TTT *u3W) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int offset = i + j * blockDim.x * gridDim.x;
// this is to avoid writting outside the arrays
if (i > NX-1 || j > NY-1) return;
u1W[offset] = texCu1_read(&i,&j); //CB:rho1
u2W[offset] = texCu2_read(&i,&j);
u3W[offset] = texCu3_read(&i,&j);
}
template <typename TTT>
__global__ void moveDomain
(TTT *u1W, TTT *u2W, TTT *u3W,
TTT *dx, int *MDX,
TTT *dy, int *MDY,
TTT *T, GPUGD_VARSFD, bool dstOut) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
// this is to avoid writting outside the arrays
if (i > NX-1 || j > NY-1) return;
int offset = i + j * blockDim.x * gridDim.x;
int MDi, MDj;
if ( (*T) >= MDT ) {
// this value is calculated on every thread with the
// same result, could be calculated just once but that
// would require an extra device variable MDi, in that
// case it could be done in the updateMDX kernel
// before calling this kernel.
MDi = static_cast<int>(((*T) - MDT)*MDVX/(*dx))-(*MDX);
MDj = static_cast<int>(((*T) - MDT)*MDVY/(*dy))-(*MDY);
} else {
MDi = 0;
MDj = 0;
}
// even when MDi == MDj == 0 you must copy the values from one
// texture to the other
int iread = i + MDi;
int jread = j + MDj;
if (iread <= NX-1
&& jread <= NY-1) {
if (dstOut) {
u1W[offset] = texAu1_read(&iread,&jread); //CB:rho1
u2W[offset] = texAu2_read(&iread,&jread);
u3W[offset] = texAu3_read(&iread,&jread);
} else {
u1W[offset] = texBu1_read(&iread,&jread); //CB:rho1
u2W[offset] = texBu2_read(&iread,&jread);
u3W[offset] = texBu3_read(&iread,&jread);
}
} else {
if (dstOut) {
u1W[offset] = texAu1_read(&i,&j); //CB:rho1
u2W[offset] = texAu2_read(&i,&j);
u3W[offset] = texAu3_read(&i,&j);
} else {
u1W[offset] = texBu1_read(&i,&j); //CB:rho1
u2W[offset] = texBu2_read(&i,&j);
u3W[offset] = texBu3_read(&i,&j);
}
}
}
template <typename TTT>
__global__ void updateMDXY
(int *MDX, TTT *dx,
int *MDY, TTT *dy,
TTT *T) {
if ( (*T) >= MDT ) {
int MDi = static_cast<int>(((*T) - MDT)*MDVX/(*dx))-(*MDX);
*MDX = (*MDX) + MDi;
int MDj = static_cast<int>(((*T) - MDT)*MDVY/(*dy))-(*MDY);
*MDY = (*MDY) + MDj;
}
}
// this is a way, maybe bad one, to make a reduction over cfl
template <typename TTT>
__global__ void reduceCFL
(TTT *cfl, GPUGD_VARSFD) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int sizered = blockDim.x * gridDim.x;
if ( i >= 0 && i <= NX * NY - 1 ) {
cfl[i] = getmax(cfl[i], cfl[i + sizered]);
#if DEBUG == 1
// if ( cfl[i] > CFLMAX ) {
// *debug1 = cfl[i];
// }
#endif /* DEBUG */
}
}
// this is the final step of the reduction.
template <typename TTT>
__global__ void reduceCFL2
(TTT *cfl, TTT *dt, TTT *T,
int *simError, GPUGD_VARSFD) {
TTT cflStep = cfl[0];
if(cflStep < CFLMAX) {
// *simError = 0;
} else {
*simError = 1;
}
}
template <typename TTT>
__global__ void update_dt
(TTT *cfl, TTT *dt,
int *simError,
GPUGD_VARSFD) {
TTT cflStep = cfl[0];
TTT new_dt = (*dt) * CFLWHISH / cflStep;
// This conditional behaviour is a patch for some executions where
// "dt too large" error turns into an infinite loop, indeed new dt
// is sometimes (a little) larger that the previous one.
if(*simError == 1 && new_dt > (*dt) * 0.9) {
*dt = (*dt) * 0.9;
} else {
*dt = new_dt;
}
}
template <typename TTT>
__global__ void update_T
(TTT *dt, TTT *T, int *n,
GPUGD_VARSFD) {
*T = (*T) + (*dt);
*n = (*n) + 1;
}
void sleepP
(unsigned int mseconds) {
int ticks = mseconds * CLOCKS_PER_SEC / 1000;
clock_t goal = ticks + clock();
while (goal > clock());
}
template <typename TTT>
void calcFrame
( gpu_data<TTT> *gd, int nothing ) {
GPUGD_EC( hipEventRecord( gd->start, 0 ) );
dim3 blocks(NX/16,NY/16);
dim3 threads(16,16);
CPUAnimBitmap *bitmap = gd->bitmap;
dim3 blocksd((NXW/ZOOM)/16,((NYW/ZOOM)+128)/16);
float exec_time_frame;
static bool uOut = true;
static TTT T = -TIDE;
static float exec_time_total;
static int n = 0;
static int simError = 0;
static TTT Tprint = T + DTPRINT;
static int frame = 0;
GPUGD_EC( hipMemcpy( gd->dev_simError,
&simError,
sizeof(int),
hipMemcpyHostToDevice ) );
GPUGD_EC( hipMemcpy( gd->dev_frame,
&frame,
sizeof(int),
hipMemcpyHostToDevice ) );
GPUGD_EC( hipMemcpy( gd->dev_n0frame,
&n,
sizeof(int),
hipMemcpyHostToDevice ) );
TTT *u1W;
TTT *u2W;
TTT *u3W;
TTT *u1R;
TTT *u2R;
TTT *u3R;
TTT dt;
int dummyStop = 1;
#if DEBUG >= 1
gpuGridDebuger<TTT> DDD;
TTT debug1 = Num0;
int gridBitSize = NX * NY * sizeof(TTT);
TTT *debug2 = (TTT*)malloc( gridBitSize );
#endif /* DEBUG */
if (initStop && n==0) {
cout << "Continue? answer 0 or 1 \n";
cin >> dummyStop;
if(dummyStop==0) {
clean_gpu(gd);
exit(0);
}
}
// For comparison with CLAWPACK see: clawpack-4.6.1/clawpack/2d/lib/claw2.f
while (T < Tprint) {
if (uOut) {
u1W = gd->dev_SrcBu1;
u2W = gd->dev_SrcBu2;
u3W = gd->dev_SrcBu3;
u1R = gd->dev_SrcAu1;
u2R = gd->dev_SrcAu2;
u3R = gd->dev_SrcAu3;
} else {
u1W = gd->dev_SrcAu1;
u2W = gd->dev_SrcAu2;
u3W = gd->dev_SrcAu3;
u1R = gd->dev_SrcBu1;
u2R = gd->dev_SrcBu2;
u3R = gd->dev_SrcBu3;
}
// TODO: place this block at the end of the step calculation, and
// check the calculation is not affected.
GPUGD_EC( hipMemcpy( &dt,
gd->dev_dt,
sizeof(TTT),
hipMemcpyDeviceToHost ) );
hipLaunchKernelGGL(( texCpy), dim3(blocks),dim3(threads), 0, 0,
gd->dev_SrcCu1, gd->dev_SrcCu2, gd->dev_SrcCu3,
!uOut);
hipLaunchKernelGGL(( boundary), dim3(blocks),dim3(threads), 0, 0,
gd->dev_MDX, gd->dev_MDY,
u1R, u2R, u3R,
gd->dev_dx, gd->dev_dy, gd->dev_dt,
gd->dev_T,
GPUGD_VARSFC,
!uOut);
// GPUGD_COUT("boundary, !uOut", !uOut);
// GPUGD_ADD(DDD,u1R,"q1");
// GPUGD_ADD(DDD,u2R,"q2");
// GPUGD_ADD(DDD,u3R,"q3");
// y-FVM
hipLaunchKernelGGL(( getWavesSpeedsCFL_y), dim3(blocks),dim3(threads), 0, 0,
gd->dev_cfl,
gd->dev_dy, gd->dev_dt,
gd->dev_simError,
GPUGD_VARSFC,
uOut);
// GPUGD_COUT("getWavesSpeedsCFL_y, uOut", uOut);
GPUGD_EC( hipMemcpy( &simError,
gd->dev_simError,
sizeof(int),
hipMemcpyDeviceToHost ) );
if (simError == 2) {
printf("ERROR: Negative eigenvalue.\n");
clean_gpu(gd);
exit(2);
}
// GPUGD_COUT("simError", simError);
// GPUGD_ADD(DDD,gd->dev_s1,"s1");
// GPUGD_ADD(DDD,gd->dev_s2,"s2");
// GPUGD_ADD(DDD,gd->dev_s1,"s1");
// GPUGD_ADD(DDD,gd->dev_s2,"s2");
// GPUGD_ADD(DDD,gd->dev_W11,"----W11");
// GPUGD_ADD(DDD,gd->dev_W21,"W21");
// GPUGD_ADD(DDD,gd->dev_W12,"W12");
// GPUGD_ADD(DDD,gd->dev_W22,"W22");
// GPUGD_ADD(DDD,gd->dev_W11,"----W11");
// GPUGD_ADD(DDD,gd->dev_W21,"W21");
// GPUGD_ADD(DDD,gd->dev_W12,"W12");
// GPUGD_ADD(DDD,gd->dev_W22,"W22");
// GPUGD_ADD(DDD,gd->dev_amdq1,"--amdq1");
// GPUGD_ADD(DDD,gd->dev_amdq2,"amdq2");
// GPUGD_ADD(DDD,gd->dev_apdq1,"apdq1");
// GPUGD_ADD(DDD,gd->dev_apdq2,"apdq2");
// GPUGD_ADD(DDD,gd->dev_amdq1,"--amdq1");
// GPUGD_ADD(DDD,gd->dev_amdq2,"amdq2");
// GPUGD_ADD(DDD,gd->dev_apdq1,"apdq1");
// GPUGD_ADD(DDD,gd->dev_apdq2,"apdq2");
hipLaunchKernelGGL(( step_y), dim3(blocks),dim3(threads), 0, 0,
u1W, u2W, u3W,
gd->dev_dy, gd->dev_dt,
GPUGD_VARSFC,
uOut);
// GPUGD_COUT("step_y, uOut", uOut);
// GPUGD_PD1(DDD,"qadd(5,6)");
// GPUGD_ADD(DDD,u1W,"q1step");
// GPUGD_ADD(DDD,u2W,"q2step");
// GPUGD_ADD(DDD,u3W,"q3step");
hipLaunchKernelGGL(( calcLimiters_y), dim3(blocks),dim3(threads), 0, 0,
GPUGD_VARSFC,
gd->dev_dy, gd->dev_dt);
// GPUGD_ADD(DDD,gd->dev_amdq1,"fadd1");
// GPUGD_ADD(DDD,gd->dev_amdq2,"fadd2");
// GPUGD_ADD(DDD,gd->dev_amdq1,"fadd1");
// GPUGD_ADD(DDD,gd->dev_amdq2,"fadd2");
hipLaunchKernelGGL(( writeLimiters_y), dim3(blocks),dim3(threads), 0, 0,
u1R, u2R, u3R,
gd->dev_dy, gd->dev_dt,
GPUGD_VARSFC,
!uOut);
// GPUGD_COUT("writeLimiters_y, !uOut", !uOut);
// GPUGD_ADD(DDD,u1R,"q1new");
// GPUGD_ADD(DDD,u2R,"q2new");
// GPUGD_ADD(DDD,u3R,"q3new");
// always check al this point the last texture write was
// uXR, otherwise use a texCpy.
// x-FVM
hipLaunchKernelGGL(( getWavesSpeedsCFL_x), dim3(blocks),dim3(threads), 0, 0,
gd->dev_cfl,
gd->dev_dx, gd->dev_dt,
gd->dev_simError,
GPUGD_VARSFC,
uOut);
// GPUGD_COUT("getWavesSpeedsCFL_x, uOut", uOut);
GPUGD_EC( hipMemcpy( &simError,
gd->dev_simError,
sizeof(int),
hipMemcpyDeviceToHost ) );
if (simError == 2) {
printf("ERROR: Negative eigenvalue.\n");
clean_gpu(gd);
exit(2);
}
// GPUGD_COUT("simError", simError);
// GPUGD_ADD(DDD,gd->dev_s1,"s1");
// GPUGD_ADD(DDD,gd->dev_s2,"s2");
// GPUGD_ADD(DDD,gd->dev_s1,"s1");
// GPUGD_ADD(DDD,gd->dev_s2,"s2");
// GPUGD_ADD(DDD,gd->dev_W11,"----W11");
// GPUGD_ADD(DDD,gd->dev_W21,"W21");
// GPUGD_ADD(DDD,gd->dev_W12,"W12");
// GPUGD_ADD(DDD,gd->dev_W22,"W22");
// GPUGD_ADD(DDD,gd->dev_W11,"----W11");
// GPUGD_ADD(DDD,gd->dev_W21,"W21");
// GPUGD_ADD(DDD,gd->dev_W12,"W12");
// GPUGD_ADD(DDD,gd->dev_W22,"W22");
// GPUGD_ADD(DDD,gd->dev_amdq1,"--amdq1");
// GPUGD_ADD(DDD,gd->dev_amdq2,"amdq2");
// GPUGD_ADD(DDD,gd->dev_apdq1,"apdq1");
// GPUGD_ADD(DDD,gd->dev_apdq2,"apdq2");
// GPUGD_ADD(DDD,gd->dev_amdq1,"--amdq1");
// GPUGD_ADD(DDD,gd->dev_amdq2,"amdq2");
// GPUGD_ADD(DDD,gd->dev_apdq1,"apdq1");
// GPUGD_ADD(DDD,gd->dev_apdq2,"apdq2");
hipLaunchKernelGGL(( step_x), dim3(blocks),dim3(threads), 0, 0,
u1W, u2W, u3W,
gd->dev_dx, gd->dev_dt,
GPUGD_VARSFC,
uOut);
// GPUGD_COUT("step_x, uOut", uOut);
// GPUGD_PD1(DDD,"qadd(5,6)");
// GPUGD_ADD(DDD,u1W,"q1step");
// GPUGD_ADD(DDD,u2W,"q2step");
// GPUGD_ADD(DDD,u3W,"q3step");
hipLaunchKernelGGL(( calcLimiters_x), dim3(blocks),dim3(threads), 0, 0,
GPUGD_VARSFC,
gd->dev_dx, gd->dev_dt);
// GPUGD_ADD(DDD,gd->dev_amdq1,"fadd1");
// GPUGD_ADD(DDD,gd->dev_amdq2,"fadd2");
// GPUGD_ADD(DDD,gd->dev_amdq1,"fadd1");
// GPUGD_ADD(DDD,gd->dev_amdq2,"fadd2");
hipLaunchKernelGGL(( writeLimiters_x), dim3(blocks),dim3(threads), 0, 0,
u1R, u2R, u3R,
gd->dev_dx, gd->dev_dt,
GPUGD_VARSFC,
!uOut);
// GPUGD_COUT("writeLimiters_x, !uOut", !uOut);
// GPUGD_ADD(DDD,u1R,"q1new");
// GPUGD_ADD(DDD,u2R,"q2new");
// GPUGD_ADD(DDD,u3R,"q3new");
/////////// reduction (maximum) over gd->dev_cfl
int sizered = NX * NY;
while(sizered > 32) {
// GPUGD_COUT("sizered :", sizered);
sizered = sizered/2;
hipLaunchKernelGGL(( reduceCFL), dim3(sizered/16),dim3(16), 0, 0,
gd->dev_cfl,
GPUGD_VARSFC);
// GPUGD_PD1(DDD,"debug1");
}
while(sizered > 2) {
// GPUGD_COUT("sizered :", sizered);
sizered = sizered/2;
hipLaunchKernelGGL(( reduceCFL), dim3(1),dim3(sizered), 0, 0,
gd->dev_cfl,
GPUGD_VARSFC);
// GPUGD_PD1(DDD,"debug1");
}
// GPUGD_COUT("sizered :", sizered);
hipLaunchKernelGGL(( reduceCFL2), dim3(1),dim3(1), 0, 0,
gd->dev_cfl, gd->dev_dt,
gd->dev_T, gd->dev_simError,
GPUGD_VARSFC);
// GPUGD_PD1(DDD,"cflStep");
/////////// end of the reduction
GPUGD_EC( hipMemcpy( &simError,
gd->dev_simError,
sizeof(bool),
hipMemcpyDeviceToHost ) );
if ( simError == 0 ) {
hipLaunchKernelGGL(( source), dim3(blocks),dim3(threads), 0, 0,
u1W, u2W, u3W,
gd->dev_dx, gd->dev_dy, gd->dev_dt,
gd->dev_MDX, gd->dev_MDY,
GPUGD_VARSFC,
uOut);
// GPUGD_COUT("source, uOut", uOut);
// GPUGD_ADD(DDD,u1W,"q1source");
// GPUGD_ADD(DDD,u2W,"q2source");
// GPUGD_ADD(DDD,u3W,"q3source");
// TODO: implement GPUGD_MEASURE
// At this point all calculations over u grids for this time
// step are done, further processes only use those values.
hipLaunchKernelGGL(( update_T), dim3(1),dim3(1), 0, 0,
gd->dev_dt, gd->dev_T, gd->dev_n,
GPUGD_VARSFC);
GPUGD_EC( hipMemcpy( &T,
gd->dev_T,
sizeof(TTT),
hipMemcpyDeviceToHost ) );
GPUGD_EC( hipMemcpy( &n,
gd->dev_n,
sizeof(int),
hipMemcpyDeviceToHost ) );
if(frameExport) {
hipLaunchKernelGGL(( dataCollect), dim3(blocks),dim3(threads), 0, 0,
gd->dev_measure1,
gd->dev_T, gd->dev_cfl,
gd->dev_n, gd->dev_frame, gd->dev_n0frame,
gd->dev_dx, gd->dev_dy,
gd->dev_MDX, gd->dev_MDY,
gd->dev_simError,
!uOut);
}
hipLaunchKernelGGL(( moveDomain), dim3(blocks),dim3(threads), 0, 0,
u1R, u2R, u3R,
gd->dev_dx, gd->dev_MDX,
gd->dev_dy, gd->dev_MDY,
gd->dev_T, GPUGD_VARSFC,
!uOut);
// GPUGD_PD1(DDD,"MDi");
hipLaunchKernelGGL(( updateMDXY), dim3(1),dim3(1), 0, 0,
gd->dev_MDX, gd->dev_dx,
gd->dev_MDY, gd->dev_dy,
gd->dev_T);
// if the last kernel called writes in uXR, X=1,2,3,
// then call the following kernell to copy values
// to uXW.
hipLaunchKernelGGL(( texCpy), dim3(blocks),dim3(threads), 0, 0,
u1W, u2W, u3W, uOut);
GPUGD_EC( hipMemcpy( &simError,
gd->dev_simError,
sizeof(bool),
hipMemcpyDeviceToHost ) );
if ( simError == 4 ) {
printf( "ERROR: something went wrong while executing data_collect \n" );
clean_gpu(gd);
exit(simError);
} else if ( simError != 0 ) {
printf( "ERROR: something went wrong, simulation error: %d \n",
simError );
clean_gpu(gd);
exit(simError);
}
// GPUGD_COUT("n", n);
GPUGD_DISPLAY(DDD,n);
GPUGD_COUT("dt", dt);
GPUGD_COUT("------------ good step finished", 0);
uOut = !uOut;
if(stepPause) sleepP(stepPause);
} else if ( simError == 1 ) {
hipLaunchKernelGGL(( restore), dim3(blocks),dim3(threads), 0, 0,
u1R, u2R, u3R);
printf( "INFO: dt too large: %20.19f \n", dt);
} else {
printf( "ERROR: something went wrong, simulation error: %d \n",
simError );
clean_gpu(gd);
exit(simError);
}
hipLaunchKernelGGL(( update_dt), dim3(1),dim3(1), 0, 0,
gd->dev_cfl, gd->dev_dt,
gd->dev_simError,
GPUGD_VARSFC);
simError = 0;
}
if(frameStop == 1) {
cout << "Enter 0 to exit or just hit ENTER to continue: ";
dummyStop = 1;
string input;
getline( cin, input );
if ( !input.empty() ) {
istringstream stream( input );
stream >> dummyStop;
}
if(dummyStop==0) {
clean_gpu(gd);
exit(0);
}
}
Tprint = Tprint + DTPRINT;
int MDX, MDY;
GPUGD_EC( hipMemcpy( &MDX,
gd->dev_MDX,
sizeof(int),
hipMemcpyDeviceToHost ) );
GPUGD_EC( hipMemcpy( &MDY,
gd->dev_MDY,
sizeof(int),
hipMemcpyDeviceToHost ) );
if(frameExport) {
dataExport
(gd->dev_measure1,
u1W, u2W, u3W,
(hipfftComplex *)gd->dev_spectrum,
&frame, &n, &MDX, &MDY,
&T, 0);
}
// In this vesion of the code fft is not implemented, the following
// is experimental code for future versions.
// if ( fftFrame ) {
// hipfftHandle planR2C;
// hipfftPlan2d(&planR2C, NX, NY, HIPFFT_R2C);
// hipfftExecR2C(planR2C, (hipfftReal *)u1W, (hipfftComplex *)gd->dev_spectrum);
// hipfftDestroy(planR2C);
// if(frameExport) {
// dataExport
// (gd->dev_measure1,
// u1W, u2W, u3W,
// (hipfftComplex *)gd->dev_spectrum,
// &frame, &n, &MDX, &MDY,
// &T, 1);
// }
// filter<<<blocks,threads>>>((hipfftComplex *)gd->dev_spectrum);
// if(frameExport) {
// dataExport
// (gd->dev_measure1,
// u1W, u2W, u3W,
// (hipfftComplex *)gd->dev_spectrum,
// &frame, &n, &MDX, &MDY,
// &T, 2);
// }
// hipfftHandle planC2R;
// hipfftPlan2d(&planC2R, NX, NY, HIPFFT_C2R);
// hipfftExecC2R(planC2R, (hipfftComplex*)gd->dev_spectrum, (hipfftReal*)gd->dev_spectrum);
// hipfftDestroy(planC2R);
// if(frameExport) {
// dataExport
// (gd->dev_measure1,
// u1W, u2W, u3W,
// (hipfftComplex *)gd->dev_spectrum,
// &frame, &n, &MDX, &MDY,
// &T, 3);
// }
// }
if(display) {
hipLaunchKernelGGL(( draw), dim3(blocksd),dim3(threads), 0, 0,
gd->dev_MDX, gd->dev_MDY,
gd->dev_draw,
(hipfftComplex *)gd->dev_spectrum,
gd->dev_dx, gd->dev_dy,
gd->dev_dt, gd->dev_T,
uOut);
// GPUGD_COUT("draw, uOut", uOut);
hipLaunchKernelGGL(( float_to_color), dim3(blocksd),dim3(threads), 0, 0,
gd->output_bitmap, gd->dev_draw);
GPUGD_EC( hipMemcpy( bitmap->get_ptr(),
gd->output_bitmap,
bitmap->image_size(),
hipMemcpyDeviceToHost ) );
}
GPUGD_EC( hipEventRecord( gd->end, 0 ) );
GPUGD_EC( hipEventSynchronize( gd->end ) );
GPUGD_EC( hipEventElapsedTime( &exec_time_frame,
gd->start, gd->end ) );
exec_time_total += exec_time_frame;
// frames are nubered starting in zero.
cout << "Execution time per frame = " << exec_time_total/static_cast<float>(frame+1) << " ms" << endl;
cout << "T = " << T << ", n = " << n << ", frame = " << frame << endl;
if(finalTime > 0.0 && T > finalTime){
cout << "final time reached" << endl;
// cin >> dummyStop;
clean_gpu(gd);
exit(0);
}
GPUGD_PRINT_INT_TOKEN(PRECISION);
GPUGD_COUT("---------- frame displayed",0);
frame++;
#if DEBUG >= 1
free( debug2 );
#endif /* DEBUG */
}
int main
() {
cout << "AMPL = " << AMPL << endl;
cout << "ETA = " << ETA << endl;
printf("NX = %d :: NY = %d \n", NX, NY);
cout << "display = " << display << endl;
GPUGD_PRINT_INT_TOKEN(PRECISION);
GPUGD_PRINT_STR_TOKEN(DATATYPEV);
GPUGD_PRINT_STR_TOKEN(DATATYPET);
GPUGD_COUT("sizeof(DATATYPEV)", sizeof(DATATYPEV) );
cout << "Precision: ";
if (PRECISION == 1) cout << "single" << endl;
if (PRECISION == 2) cout << "double" << endl;
gpu_data<DATATYPEV> gd;
GPUGD_EC( hipEventCreate( &gd.start ) );
GPUGD_EC( hipEventCreate( &gd.end ) );
int gridBitSize = NX * NY * sizeof(DATATYPEV);
cout << "gridBitSize = " << gridBitSize << endl;
DATATYPEV *dt = new DATATYPEV;
DATATYPEV *dx = new DATATYPEV;
DATATYPEV *dy = new DATATYPEV;
DATATYPEV *T = new DATATYPEV;
int *MDX = new int;
int *MDY = new int;
DATATYPEV *debug1 = new DATATYPEV;
*dt = DTINI;
*dx = XMAX/(NX-1);
*dy = YMAX/(NY-1);
printf("dx = %12.5e :: dy = %12.5e \n", *dx, *dy);
*T = - TIDE;
*MDX = 0;
*MDY = 0;
DATATYPEV *u1 = (DATATYPEV*)malloc( gridBitSize );
DATATYPEV *u2 = (DATATYPEV*)malloc( gridBitSize );
DATATYPEV *u3 = (DATATYPEV*)malloc( gridBitSize );
#if DEBUG >= 1
*debug1 = Num0;
DATATYPEV *debug2 = (DATATYPEV*)malloc( gridBitSize );
#endif /* DEBUG */
// initial values in cpu variables
init
(u1,u2,u3,
dx,dy,dt
#if DEBUG >= 1
,debug1,debug2
#else
,none
#endif /* DEBUG */
);
#if DEBUG >= 1
delete debug1;
free( debug2 );
#endif /* DEBUG */
char name[] = "FiVoNAGI";
if(display) {
CPUAnimBitmap bitmap( NXW / ZOOM, (NYW / ZOOM) + 128, &gd );
gd.bitmap = &bitmap;
int imageSize = bitmap.image_size();
cout << "imageSize = " << imageSize << endl;
int drawSize = (NXW/ZOOM)*((NYW/ZOOM)+128)*sizeof(float);
cout << "drawSize = " << drawSize << endl;
// gpu memory allocation and initial values copy from cpu to gpu
gpu_init
(&gd,
gridBitSize, imageSize, drawSize,
dt, dx, dy,
T, MDX, MDY,
u1, u2, u3
#if DEBUG >= 1
,debug1,debug2
#else
,none
#endif /* DEBUG */
);
delete dt;
delete dx;
delete dy;
delete T;
delete MDX;
delete MDY;
free( u1 );
free( u2 );
free( u3 );
bitmap.anim_and_exit( (void (*)(void*,int))calcFrame<DATATYPEV>,
(void (*)(void*))clean_gpu<DATATYPEV>,
name );
} else { // no display
// gpu memory allocation and initial values copy from cpu to gpu
gpu_init
(&gd,
gridBitSize, 0, 0,
dt, dx, dy,
T, MDX, MDY,
u1, u2, u3
#if DEBUG >= 1
,debug1,debug2
#else
,none
#endif /* DEBUG */
);
delete dt;
delete dx;
delete dy;
delete T;
delete MDX;
delete MDY;
free( u1 );
free( u2 );
free( u3 );
try {
while(1) {
calcFrame(&gd,1);
}
} catch (...) {
cout << "An exception occurred." << '\n';
clean_gpu(&gd);
}
}
}
// The following lines are needed because clean_gpu and calcFrame
// functions are called through a pointer, then the compiler doesn't
// know which instance (of the template) to compile, so we are telling
// it.
template void clean_gpu<DATATYPEV>( gpu_data<DATATYPEV> *gd );
template void calcFrame<DATATYPEV>( gpu_data<DATATYPEV> *gd, int nothing );
| 6a8043d462fdc54fbb2a5fdf632e8fd5d2beadd0.cu | //=======================================================================
//
// Name : Finite Volume Nonlinear Acoustics GPU Implementation (FiVoNAGI)
//
// Authors : Roberto Velasco Segura and Pablo L. Rend\'on
//
// License : see licence.txt in the root directory of the repository.
//
//=======================================================================
// use 1 for single precision and 2 for double precision
#ifndef PRECISION
#define PRECISION 1
#endif /* PRECISION */
#if PRECISION == 1
#define DATATYPEV float
#define DATATYPET float
#elif PRECISION == 2
#define DATATYPEV double
#define DATATYPET int2
#else /* PRECISION value */
# error unresolved PRECISION value
#endif /* PRECISION value */
#ifndef UNINCLUDE
#include "numbers.h"
#include "parameters.h"
#include "data_definitions.h"
#include "init.h"
#include "boundary.h"
#include "filter.h"
#include "draw_float_cut.h"
#include "data_export.h"
#include "data_collect.h"
#include "source.h"
#include "fv.h"
// cuda
#include "cuda.h"
#include "../nv/cpu_anim.h"
// system
#include <iostream>
#include <vector>
#include <string>
#include <fstream>
#include <math.h>
#include <time.h> // just for the sleep function
#include <iomanip>
#include <cufft.h>
#endif /* UNINCLUDE */
#ifndef DEBUG
#define DEBUG 0
#endif /* DEBUG */
#include "debug_tools.h"
using namespace std;
// TODO: place the following two functions in a separate file. This is
// needed to include this file as dependence of other h files.
template <typename TTT>
__forceinline__ __device__ TTT getmax(TTT x, TTT y)
{ return (x > y)?x:y; }
template <typename TTT>
__forceinline__ __device__ TTT getmin(TTT x, TTT y)
{ return (x < y)?x:y; }
template <typename TTT>
__global__ void texCpy
(TTT *u1W, TTT *u2W, TTT *u3W, bool dstOut) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int offset = i + j * blockDim.x * gridDim.x;
// this is to avoid writting outside the arrays
if (i > NX-1 || j > NY-1) return;
if (dstOut) {
u1W[offset] = texAu1_read(&i,&j); //CB:rho1
u2W[offset] = texAu2_read(&i,&j);
u3W[offset] = texAu3_read(&i,&j);
} else {
u1W[offset] = texBu1_read(&i,&j); //CB:rho1
u2W[offset] = texBu2_read(&i,&j);
u3W[offset] = texBu3_read(&i,&j);
}
}
template <typename TTT>
__global__ void restore
(TTT *u1W, TTT *u2W, TTT *u3W) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int offset = i + j * blockDim.x * gridDim.x;
// this is to avoid writting outside the arrays
if (i > NX-1 || j > NY-1) return;
u1W[offset] = texCu1_read(&i,&j); //CB:rho1
u2W[offset] = texCu2_read(&i,&j);
u3W[offset] = texCu3_read(&i,&j);
}
template <typename TTT>
__global__ void moveDomain
(TTT *u1W, TTT *u2W, TTT *u3W,
TTT *dx, int *MDX,
TTT *dy, int *MDY,
TTT *T, GPUGD_VARSFD, bool dstOut) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
// this is to avoid writting outside the arrays
if (i > NX-1 || j > NY-1) return;
int offset = i + j * blockDim.x * gridDim.x;
int MDi, MDj;
if ( (*T) >= MDT ) {
// this value is calculated on every thread with the
// same result, could be calculated just once but that
// would require an extra device variable MDi, in that
// case it could be done in the updateMDX kernel
// before calling this kernel.
MDi = static_cast<int>(((*T) - MDT)*MDVX/(*dx))-(*MDX);
MDj = static_cast<int>(((*T) - MDT)*MDVY/(*dy))-(*MDY);
} else {
MDi = 0;
MDj = 0;
}
// even when MDi == MDj == 0 you must copy the values from one
// texture to the other
int iread = i + MDi;
int jread = j + MDj;
if (iread <= NX-1
&& jread <= NY-1) {
if (dstOut) {
u1W[offset] = texAu1_read(&iread,&jread); //CB:rho1
u2W[offset] = texAu2_read(&iread,&jread);
u3W[offset] = texAu3_read(&iread,&jread);
} else {
u1W[offset] = texBu1_read(&iread,&jread); //CB:rho1
u2W[offset] = texBu2_read(&iread,&jread);
u3W[offset] = texBu3_read(&iread,&jread);
}
} else {
if (dstOut) {
u1W[offset] = texAu1_read(&i,&j); //CB:rho1
u2W[offset] = texAu2_read(&i,&j);
u3W[offset] = texAu3_read(&i,&j);
} else {
u1W[offset] = texBu1_read(&i,&j); //CB:rho1
u2W[offset] = texBu2_read(&i,&j);
u3W[offset] = texBu3_read(&i,&j);
}
}
}
template <typename TTT>
__global__ void updateMDXY
(int *MDX, TTT *dx,
int *MDY, TTT *dy,
TTT *T) {
if ( (*T) >= MDT ) {
int MDi = static_cast<int>(((*T) - MDT)*MDVX/(*dx))-(*MDX);
*MDX = (*MDX) + MDi;
int MDj = static_cast<int>(((*T) - MDT)*MDVY/(*dy))-(*MDY);
*MDY = (*MDY) + MDj;
}
}
// this is a way, maybe bad one, to make a reduction over cfl
template <typename TTT>
__global__ void reduceCFL
(TTT *cfl, GPUGD_VARSFD) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int sizered = blockDim.x * gridDim.x;
if ( i >= 0 && i <= NX * NY - 1 ) {
cfl[i] = getmax(cfl[i], cfl[i + sizered]);
#if DEBUG == 1
// if ( cfl[i] > CFLMAX ) {
// *debug1 = cfl[i];
// }
#endif /* DEBUG */
}
}
// this is the final step of the reduction.
template <typename TTT>
__global__ void reduceCFL2
(TTT *cfl, TTT *dt, TTT *T,
int *simError, GPUGD_VARSFD) {
TTT cflStep = cfl[0];
if(cflStep < CFLMAX) {
// *simError = 0;
} else {
*simError = 1;
}
}
template <typename TTT>
__global__ void update_dt
(TTT *cfl, TTT *dt,
int *simError,
GPUGD_VARSFD) {
TTT cflStep = cfl[0];
TTT new_dt = (*dt) * CFLWHISH / cflStep;
// This conditional behaviour is a patch for some executions where
// "dt too large" error turns into an infinite loop, indeed new dt
// is sometimes (a little) larger that the previous one.
if(*simError == 1 && new_dt > (*dt) * 0.9) {
*dt = (*dt) * 0.9;
} else {
*dt = new_dt;
}
}
template <typename TTT>
__global__ void update_T
(TTT *dt, TTT *T, int *n,
GPUGD_VARSFD) {
*T = (*T) + (*dt);
*n = (*n) + 1;
}
void sleepP
(unsigned int mseconds) {
int ticks = mseconds * CLOCKS_PER_SEC / 1000;
clock_t goal = ticks + clock();
while (goal > clock());
}
template <typename TTT>
void calcFrame
( gpu_data<TTT> *gd, int nothing ) {
GPUGD_EC( cudaEventRecord( gd->start, 0 ) );
dim3 blocks(NX/16,NY/16);
dim3 threads(16,16);
CPUAnimBitmap *bitmap = gd->bitmap;
dim3 blocksd((NXW/ZOOM)/16,((NYW/ZOOM)+128)/16);
float exec_time_frame;
static bool uOut = true;
static TTT T = -TIDE;
static float exec_time_total;
static int n = 0;
static int simError = 0;
static TTT Tprint = T + DTPRINT;
static int frame = 0;
GPUGD_EC( cudaMemcpy( gd->dev_simError,
&simError,
sizeof(int),
cudaMemcpyHostToDevice ) );
GPUGD_EC( cudaMemcpy( gd->dev_frame,
&frame,
sizeof(int),
cudaMemcpyHostToDevice ) );
GPUGD_EC( cudaMemcpy( gd->dev_n0frame,
&n,
sizeof(int),
cudaMemcpyHostToDevice ) );
TTT *u1W;
TTT *u2W;
TTT *u3W;
TTT *u1R;
TTT *u2R;
TTT *u3R;
TTT dt;
int dummyStop = 1;
#if DEBUG >= 1
gpuGridDebuger<TTT> DDD;
TTT debug1 = Num0;
int gridBitSize = NX * NY * sizeof(TTT);
TTT *debug2 = (TTT*)malloc( gridBitSize );
#endif /* DEBUG */
if (initStop && n==0) {
cout << "Continue? answer 0 or 1 \n";
cin >> dummyStop;
if(dummyStop==0) {
clean_gpu(gd);
exit(0);
}
}
// For comparison with CLAWPACK see: clawpack-4.6.1/clawpack/2d/lib/claw2.f
while (T < Tprint) {
if (uOut) {
u1W = gd->dev_SrcBu1;
u2W = gd->dev_SrcBu2;
u3W = gd->dev_SrcBu3;
u1R = gd->dev_SrcAu1;
u2R = gd->dev_SrcAu2;
u3R = gd->dev_SrcAu3;
} else {
u1W = gd->dev_SrcAu1;
u2W = gd->dev_SrcAu2;
u3W = gd->dev_SrcAu3;
u1R = gd->dev_SrcBu1;
u2R = gd->dev_SrcBu2;
u3R = gd->dev_SrcBu3;
}
// TODO: place this block at the end of the step calculation, and
// check the calculation is not affected.
GPUGD_EC( cudaMemcpy( &dt,
gd->dev_dt,
sizeof(TTT),
cudaMemcpyDeviceToHost ) );
texCpy<<<blocks,threads>>>
(gd->dev_SrcCu1, gd->dev_SrcCu2, gd->dev_SrcCu3,
!uOut);
boundary<<<blocks,threads>>>
(gd->dev_MDX, gd->dev_MDY,
u1R, u2R, u3R,
gd->dev_dx, gd->dev_dy, gd->dev_dt,
gd->dev_T,
GPUGD_VARSFC,
!uOut);
// GPUGD_COUT("boundary, !uOut", !uOut);
// GPUGD_ADD(DDD,u1R,"q1");
// GPUGD_ADD(DDD,u2R,"q2");
// GPUGD_ADD(DDD,u3R,"q3");
// y-FVM
getWavesSpeedsCFL_y<<<blocks,threads>>>
(gd->dev_cfl,
gd->dev_dy, gd->dev_dt,
gd->dev_simError,
GPUGD_VARSFC,
uOut);
// GPUGD_COUT("getWavesSpeedsCFL_y, uOut", uOut);
GPUGD_EC( cudaMemcpy( &simError,
gd->dev_simError,
sizeof(int),
cudaMemcpyDeviceToHost ) );
if (simError == 2) {
printf("ERROR: Negative eigenvalue.\n");
clean_gpu(gd);
exit(2);
}
// GPUGD_COUT("simError", simError);
// GPUGD_ADD(DDD,gd->dev_s1,"s1");
// GPUGD_ADD(DDD,gd->dev_s2,"s2");
// GPUGD_ADD(DDD,gd->dev_s1,"s1");
// GPUGD_ADD(DDD,gd->dev_s2,"s2");
// GPUGD_ADD(DDD,gd->dev_W11,"----W11");
// GPUGD_ADD(DDD,gd->dev_W21,"W21");
// GPUGD_ADD(DDD,gd->dev_W12,"W12");
// GPUGD_ADD(DDD,gd->dev_W22,"W22");
// GPUGD_ADD(DDD,gd->dev_W11,"----W11");
// GPUGD_ADD(DDD,gd->dev_W21,"W21");
// GPUGD_ADD(DDD,gd->dev_W12,"W12");
// GPUGD_ADD(DDD,gd->dev_W22,"W22");
// GPUGD_ADD(DDD,gd->dev_amdq1,"--amdq1");
// GPUGD_ADD(DDD,gd->dev_amdq2,"amdq2");
// GPUGD_ADD(DDD,gd->dev_apdq1,"apdq1");
// GPUGD_ADD(DDD,gd->dev_apdq2,"apdq2");
// GPUGD_ADD(DDD,gd->dev_amdq1,"--amdq1");
// GPUGD_ADD(DDD,gd->dev_amdq2,"amdq2");
// GPUGD_ADD(DDD,gd->dev_apdq1,"apdq1");
// GPUGD_ADD(DDD,gd->dev_apdq2,"apdq2");
step_y<<<blocks,threads>>>
(u1W, u2W, u3W,
gd->dev_dy, gd->dev_dt,
GPUGD_VARSFC,
uOut);
// GPUGD_COUT("step_y, uOut", uOut);
// GPUGD_PD1(DDD,"qadd(5,6)");
// GPUGD_ADD(DDD,u1W,"q1step");
// GPUGD_ADD(DDD,u2W,"q2step");
// GPUGD_ADD(DDD,u3W,"q3step");
calcLimiters_y<<<blocks,threads>>>
(GPUGD_VARSFC,
gd->dev_dy, gd->dev_dt);
// GPUGD_ADD(DDD,gd->dev_amdq1,"fadd1");
// GPUGD_ADD(DDD,gd->dev_amdq2,"fadd2");
// GPUGD_ADD(DDD,gd->dev_amdq1,"fadd1");
// GPUGD_ADD(DDD,gd->dev_amdq2,"fadd2");
writeLimiters_y<<<blocks,threads>>>
(u1R, u2R, u3R,
gd->dev_dy, gd->dev_dt,
GPUGD_VARSFC,
!uOut);
// GPUGD_COUT("writeLimiters_y, !uOut", !uOut);
// GPUGD_ADD(DDD,u1R,"q1new");
// GPUGD_ADD(DDD,u2R,"q2new");
// GPUGD_ADD(DDD,u3R,"q3new");
// always check al this point the last texture write was
// uXR, otherwise use a texCpy.
// x-FVM
getWavesSpeedsCFL_x<<<blocks,threads>>>
(gd->dev_cfl,
gd->dev_dx, gd->dev_dt,
gd->dev_simError,
GPUGD_VARSFC,
uOut);
// GPUGD_COUT("getWavesSpeedsCFL_x, uOut", uOut);
GPUGD_EC( cudaMemcpy( &simError,
gd->dev_simError,
sizeof(int),
cudaMemcpyDeviceToHost ) );
if (simError == 2) {
printf("ERROR: Negative eigenvalue.\n");
clean_gpu(gd);
exit(2);
}
// GPUGD_COUT("simError", simError);
// GPUGD_ADD(DDD,gd->dev_s1,"s1");
// GPUGD_ADD(DDD,gd->dev_s2,"s2");
// GPUGD_ADD(DDD,gd->dev_s1,"s1");
// GPUGD_ADD(DDD,gd->dev_s2,"s2");
// GPUGD_ADD(DDD,gd->dev_W11,"----W11");
// GPUGD_ADD(DDD,gd->dev_W21,"W21");
// GPUGD_ADD(DDD,gd->dev_W12,"W12");
// GPUGD_ADD(DDD,gd->dev_W22,"W22");
// GPUGD_ADD(DDD,gd->dev_W11,"----W11");
// GPUGD_ADD(DDD,gd->dev_W21,"W21");
// GPUGD_ADD(DDD,gd->dev_W12,"W12");
// GPUGD_ADD(DDD,gd->dev_W22,"W22");
// GPUGD_ADD(DDD,gd->dev_amdq1,"--amdq1");
// GPUGD_ADD(DDD,gd->dev_amdq2,"amdq2");
// GPUGD_ADD(DDD,gd->dev_apdq1,"apdq1");
// GPUGD_ADD(DDD,gd->dev_apdq2,"apdq2");
// GPUGD_ADD(DDD,gd->dev_amdq1,"--amdq1");
// GPUGD_ADD(DDD,gd->dev_amdq2,"amdq2");
// GPUGD_ADD(DDD,gd->dev_apdq1,"apdq1");
// GPUGD_ADD(DDD,gd->dev_apdq2,"apdq2");
step_x<<<blocks,threads>>>
(u1W, u2W, u3W,
gd->dev_dx, gd->dev_dt,
GPUGD_VARSFC,
uOut);
// GPUGD_COUT("step_x, uOut", uOut);
// GPUGD_PD1(DDD,"qadd(5,6)");
// GPUGD_ADD(DDD,u1W,"q1step");
// GPUGD_ADD(DDD,u2W,"q2step");
// GPUGD_ADD(DDD,u3W,"q3step");
calcLimiters_x<<<blocks,threads>>>
(GPUGD_VARSFC,
gd->dev_dx, gd->dev_dt);
// GPUGD_ADD(DDD,gd->dev_amdq1,"fadd1");
// GPUGD_ADD(DDD,gd->dev_amdq2,"fadd2");
// GPUGD_ADD(DDD,gd->dev_amdq1,"fadd1");
// GPUGD_ADD(DDD,gd->dev_amdq2,"fadd2");
writeLimiters_x<<<blocks,threads>>>
(u1R, u2R, u3R,
gd->dev_dx, gd->dev_dt,
GPUGD_VARSFC,
!uOut);
// GPUGD_COUT("writeLimiters_x, !uOut", !uOut);
// GPUGD_ADD(DDD,u1R,"q1new");
// GPUGD_ADD(DDD,u2R,"q2new");
// GPUGD_ADD(DDD,u3R,"q3new");
/////////// reduction (maximum) over gd->dev_cfl
int sizered = NX * NY;
while(sizered > 32) {
// GPUGD_COUT("sizered :", sizered);
sizered = sizered/2;
reduceCFL<<<sizered/16,16>>>
(gd->dev_cfl,
GPUGD_VARSFC);
// GPUGD_PD1(DDD,"debug1");
}
while(sizered > 2) {
// GPUGD_COUT("sizered :", sizered);
sizered = sizered/2;
reduceCFL<<<1,sizered>>>
(gd->dev_cfl,
GPUGD_VARSFC);
// GPUGD_PD1(DDD,"debug1");
}
// GPUGD_COUT("sizered :", sizered);
reduceCFL2<<<1,1>>>
(gd->dev_cfl, gd->dev_dt,
gd->dev_T, gd->dev_simError,
GPUGD_VARSFC);
// GPUGD_PD1(DDD,"cflStep");
/////////// end of the reduction
GPUGD_EC( cudaMemcpy( &simError,
gd->dev_simError,
sizeof(bool),
cudaMemcpyDeviceToHost ) );
if ( simError == 0 ) {
source<<<blocks,threads>>>
(u1W, u2W, u3W,
gd->dev_dx, gd->dev_dy, gd->dev_dt,
gd->dev_MDX, gd->dev_MDY,
GPUGD_VARSFC,
uOut);
// GPUGD_COUT("source, uOut", uOut);
// GPUGD_ADD(DDD,u1W,"q1source");
// GPUGD_ADD(DDD,u2W,"q2source");
// GPUGD_ADD(DDD,u3W,"q3source");
// TODO: implement GPUGD_MEASURE
// At this point all calculations over u grids for this time
// step are done, further processes only use those values.
update_T<<<1,1>>>
(gd->dev_dt, gd->dev_T, gd->dev_n,
GPUGD_VARSFC);
GPUGD_EC( cudaMemcpy( &T,
gd->dev_T,
sizeof(TTT),
cudaMemcpyDeviceToHost ) );
GPUGD_EC( cudaMemcpy( &n,
gd->dev_n,
sizeof(int),
cudaMemcpyDeviceToHost ) );
if(frameExport) {
dataCollect<<<blocks,threads>>>
(gd->dev_measure1,
gd->dev_T, gd->dev_cfl,
gd->dev_n, gd->dev_frame, gd->dev_n0frame,
gd->dev_dx, gd->dev_dy,
gd->dev_MDX, gd->dev_MDY,
gd->dev_simError,
!uOut);
}
moveDomain<<<blocks,threads>>>
(u1R, u2R, u3R,
gd->dev_dx, gd->dev_MDX,
gd->dev_dy, gd->dev_MDY,
gd->dev_T, GPUGD_VARSFC,
!uOut);
// GPUGD_PD1(DDD,"MDi");
updateMDXY<<<1,1>>>
(gd->dev_MDX, gd->dev_dx,
gd->dev_MDY, gd->dev_dy,
gd->dev_T);
// if the last kernel called writes in uXR, X=1,2,3,
// then call the following kernell to copy values
// to uXW.
texCpy<<<blocks,threads>>>
(u1W, u2W, u3W, uOut);
GPUGD_EC( cudaMemcpy( &simError,
gd->dev_simError,
sizeof(bool),
cudaMemcpyDeviceToHost ) );
if ( simError == 4 ) {
printf( "ERROR: something went wrong while executing data_collect \n" );
clean_gpu(gd);
exit(simError);
} else if ( simError != 0 ) {
printf( "ERROR: something went wrong, simulation error: %d \n",
simError );
clean_gpu(gd);
exit(simError);
}
// GPUGD_COUT("n", n);
GPUGD_DISPLAY(DDD,n);
GPUGD_COUT("dt", dt);
GPUGD_COUT("------------ good step finished", 0);
uOut = !uOut;
if(stepPause) sleepP(stepPause);
} else if ( simError == 1 ) {
restore<<<blocks,threads>>>
(u1R, u2R, u3R);
printf( "INFO: dt too large: %20.19f \n", dt);
} else {
printf( "ERROR: something went wrong, simulation error: %d \n",
simError );
clean_gpu(gd);
exit(simError);
}
update_dt<<<1,1>>>
(gd->dev_cfl, gd->dev_dt,
gd->dev_simError,
GPUGD_VARSFC);
simError = 0;
}
if(frameStop == 1) {
cout << "Enter 0 to exit or just hit ENTER to continue: ";
dummyStop = 1;
string input;
getline( cin, input );
if ( !input.empty() ) {
istringstream stream( input );
stream >> dummyStop;
}
if(dummyStop==0) {
clean_gpu(gd);
exit(0);
}
}
Tprint = Tprint + DTPRINT;
int MDX, MDY;
GPUGD_EC( cudaMemcpy( &MDX,
gd->dev_MDX,
sizeof(int),
cudaMemcpyDeviceToHost ) );
GPUGD_EC( cudaMemcpy( &MDY,
gd->dev_MDY,
sizeof(int),
cudaMemcpyDeviceToHost ) );
if(frameExport) {
dataExport
(gd->dev_measure1,
u1W, u2W, u3W,
(cufftComplex *)gd->dev_spectrum,
&frame, &n, &MDX, &MDY,
&T, 0);
}
// In this vesion of the code fft is not implemented, the following
// is experimental code for future versions.
// if ( fftFrame ) {
// cufftHandle planR2C;
// cufftPlan2d(&planR2C, NX, NY, CUFFT_R2C);
// cufftExecR2C(planR2C, (cufftReal *)u1W, (cufftComplex *)gd->dev_spectrum);
// cufftDestroy(planR2C);
// if(frameExport) {
// dataExport
// (gd->dev_measure1,
// u1W, u2W, u3W,
// (cufftComplex *)gd->dev_spectrum,
// &frame, &n, &MDX, &MDY,
// &T, 1);
// }
// filter<<<blocks,threads>>>((cufftComplex *)gd->dev_spectrum);
// if(frameExport) {
// dataExport
// (gd->dev_measure1,
// u1W, u2W, u3W,
// (cufftComplex *)gd->dev_spectrum,
// &frame, &n, &MDX, &MDY,
// &T, 2);
// }
// cufftHandle planC2R;
// cufftPlan2d(&planC2R, NX, NY, CUFFT_C2R);
// cufftExecC2R(planC2R, (cufftComplex*)gd->dev_spectrum, (cufftReal*)gd->dev_spectrum);
// cufftDestroy(planC2R);
// if(frameExport) {
// dataExport
// (gd->dev_measure1,
// u1W, u2W, u3W,
// (cufftComplex *)gd->dev_spectrum,
// &frame, &n, &MDX, &MDY,
// &T, 3);
// }
// }
if(display) {
draw<<<blocksd,threads>>>
(gd->dev_MDX, gd->dev_MDY,
gd->dev_draw,
(cufftComplex *)gd->dev_spectrum,
gd->dev_dx, gd->dev_dy,
gd->dev_dt, gd->dev_T,
uOut);
// GPUGD_COUT("draw, uOut", uOut);
float_to_color<<<blocksd,threads>>>
(gd->output_bitmap, gd->dev_draw);
GPUGD_EC( cudaMemcpy( bitmap->get_ptr(),
gd->output_bitmap,
bitmap->image_size(),
cudaMemcpyDeviceToHost ) );
}
GPUGD_EC( cudaEventRecord( gd->end, 0 ) );
GPUGD_EC( cudaEventSynchronize( gd->end ) );
GPUGD_EC( cudaEventElapsedTime( &exec_time_frame,
gd->start, gd->end ) );
exec_time_total += exec_time_frame;
// frames are nubered starting in zero.
cout << "Execution time per frame = " << exec_time_total/static_cast<float>(frame+1) << " ms" << endl;
cout << "T = " << T << ", n = " << n << ", frame = " << frame << endl;
if(finalTime > 0.0 && T > finalTime){
cout << "final time reached" << endl;
// cin >> dummyStop;
clean_gpu(gd);
exit(0);
}
GPUGD_PRINT_INT_TOKEN(PRECISION);
GPUGD_COUT("---------- frame displayed",0);
frame++;
#if DEBUG >= 1
free( debug2 );
#endif /* DEBUG */
}
int main
() {
cout << "AMPL = " << AMPL << endl;
cout << "ETA = " << ETA << endl;
printf("NX = %d :: NY = %d \n", NX, NY);
cout << "display = " << display << endl;
GPUGD_PRINT_INT_TOKEN(PRECISION);
GPUGD_PRINT_STR_TOKEN(DATATYPEV);
GPUGD_PRINT_STR_TOKEN(DATATYPET);
GPUGD_COUT("sizeof(DATATYPEV)", sizeof(DATATYPEV) );
cout << "Precision: ";
if (PRECISION == 1) cout << "single" << endl;
if (PRECISION == 2) cout << "double" << endl;
gpu_data<DATATYPEV> gd;
GPUGD_EC( cudaEventCreate( &gd.start ) );
GPUGD_EC( cudaEventCreate( &gd.end ) );
int gridBitSize = NX * NY * sizeof(DATATYPEV);
cout << "gridBitSize = " << gridBitSize << endl;
DATATYPEV *dt = new DATATYPEV;
DATATYPEV *dx = new DATATYPEV;
DATATYPEV *dy = new DATATYPEV;
DATATYPEV *T = new DATATYPEV;
int *MDX = new int;
int *MDY = new int;
DATATYPEV *debug1 = new DATATYPEV;
*dt = DTINI;
*dx = XMAX/(NX-1);
*dy = YMAX/(NY-1);
printf("dx = %12.5e :: dy = %12.5e \n", *dx, *dy);
*T = - TIDE;
*MDX = 0;
*MDY = 0;
DATATYPEV *u1 = (DATATYPEV*)malloc( gridBitSize );
DATATYPEV *u2 = (DATATYPEV*)malloc( gridBitSize );
DATATYPEV *u3 = (DATATYPEV*)malloc( gridBitSize );
#if DEBUG >= 1
*debug1 = Num0;
DATATYPEV *debug2 = (DATATYPEV*)malloc( gridBitSize );
#endif /* DEBUG */
// initial values in cpu variables
init
(u1,u2,u3,
dx,dy,dt
#if DEBUG >= 1
,debug1,debug2
#else
,none
#endif /* DEBUG */
);
#if DEBUG >= 1
delete debug1;
free( debug2 );
#endif /* DEBUG */
char name[] = "FiVoNAGI";
if(display) {
CPUAnimBitmap bitmap( NXW / ZOOM, (NYW / ZOOM) + 128, &gd );
gd.bitmap = &bitmap;
int imageSize = bitmap.image_size();
cout << "imageSize = " << imageSize << endl;
int drawSize = (NXW/ZOOM)*((NYW/ZOOM)+128)*sizeof(float);
cout << "drawSize = " << drawSize << endl;
// gpu memory allocation and initial values copy from cpu to gpu
gpu_init
(&gd,
gridBitSize, imageSize, drawSize,
dt, dx, dy,
T, MDX, MDY,
u1, u2, u3
#if DEBUG >= 1
,debug1,debug2
#else
,none
#endif /* DEBUG */
);
delete dt;
delete dx;
delete dy;
delete T;
delete MDX;
delete MDY;
free( u1 );
free( u2 );
free( u3 );
bitmap.anim_and_exit( (void (*)(void*,int))calcFrame<DATATYPEV>,
(void (*)(void*))clean_gpu<DATATYPEV>,
name );
} else { // no display
// gpu memory allocation and initial values copy from cpu to gpu
gpu_init
(&gd,
gridBitSize, 0, 0,
dt, dx, dy,
T, MDX, MDY,
u1, u2, u3
#if DEBUG >= 1
,debug1,debug2
#else
,none
#endif /* DEBUG */
);
delete dt;
delete dx;
delete dy;
delete T;
delete MDX;
delete MDY;
free( u1 );
free( u2 );
free( u3 );
try {
while(1) {
calcFrame(&gd,1);
}
} catch (...) {
cout << "An exception occurred." << '\n';
clean_gpu(&gd);
}
}
}
// The following lines are needed because clean_gpu and calcFrame
// functions are called through a pointer, then the compiler doesn't
// know which instance (of the template) to compile, so we are telling
// it.
template void clean_gpu<DATATYPEV>( gpu_data<DATATYPEV> *gd );
template void calcFrame<DATATYPEV>( gpu_data<DATATYPEV> *gd, int nothing );
|
8b7b7ba7a13f151f119e5300f504182873601f62.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <malloc.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <iostream>
#include <vector>
#include <fstream>
#include <assert.h>
#include "../benchmark_common.h"
#include "kernel.h"
void read_parameters(char* filename, int* iterations, int* jump, int* stride,
int* blocks, int* threads)
{
std::vector<std::string> vecOfStrs;
std::ifstream in(filename);
std::string str;
while (std::getline(in, str))
{
if(str.size() > 0)
vecOfStrs.push_back(str);
}
assert(vecOfStrs.size() >= 5);
*iterations = std::atoi(vecOfStrs[0].c_str());
*jump = std::atoi(vecOfStrs[1].c_str());
*stride = std::atoi(vecOfStrs[2].c_str());
*blocks = std::atoi(vecOfStrs[3].c_str());
*threads = std::atoi(vecOfStrs[4].c_str());
}
int main_micro(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag)
{
int iterations, jump, stride, blocks, threads;
read_parameters("MICRO/parameters", &iterations, &jump, &stride, &blocks,
&threads);
printf("%d\t%d\t%d\t%d\t%d\n", iterations, jump, stride, blocks, threads);
int size = stride * jump * blocks * threads;
int *host_array_a, *host_array_b;
int *device_array_a, *device_array_b;
host_array_a = (int*) malloc(sizeof(int) * size);
host_array_b = (int*) malloc(sizeof(int) * size);
hipMalloc((void **)&device_array_a, size*sizeof(int));
hipMalloc((void **)&device_array_b, size*sizeof(int));
hipMemcpyAsync(device_array_a, host_array_a, size*sizeof(int),
hipMemcpyHostToDevice, stream_app);
void (*kernel) (int*, int*, int, int, int) = &kernel_strided;
/* void (*kernel) (int*, int*, int, int, int) = &kernel_reverse; */
hipLaunchKernelGGL(( (kernel)), dim3(blocks), dim3(threads), 0, stream_app, device_array_a, device_array_b,
iterations, jump, stride);
pthread_mutex_unlock(mutexapp);
cutilSafeCall(hipStreamSynchronize(stream_app));
hipMemcpyAsync(host_array_a, device_array_a, size*sizeof(int),
hipMemcpyDeviceToHost, stream_app);
hipFree(device_array_a);
hipFree(device_array_b);
}
| 8b7b7ba7a13f151f119e5300f504182873601f62.cu | #include <malloc.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <iostream>
#include <vector>
#include <fstream>
#include <assert.h>
#include "../benchmark_common.h"
#include "kernel.h"
void read_parameters(char* filename, int* iterations, int* jump, int* stride,
int* blocks, int* threads)
{
std::vector<std::string> vecOfStrs;
std::ifstream in(filename);
std::string str;
while (std::getline(in, str))
{
if(str.size() > 0)
vecOfStrs.push_back(str);
}
assert(vecOfStrs.size() >= 5);
*iterations = std::atoi(vecOfStrs[0].c_str());
*jump = std::atoi(vecOfStrs[1].c_str());
*stride = std::atoi(vecOfStrs[2].c_str());
*blocks = std::atoi(vecOfStrs[3].c_str());
*threads = std::atoi(vecOfStrs[4].c_str());
}
int main_micro(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag)
{
int iterations, jump, stride, blocks, threads;
read_parameters("MICRO/parameters", &iterations, &jump, &stride, &blocks,
&threads);
printf("%d\t%d\t%d\t%d\t%d\n", iterations, jump, stride, blocks, threads);
int size = stride * jump * blocks * threads;
int *host_array_a, *host_array_b;
int *device_array_a, *device_array_b;
host_array_a = (int*) malloc(sizeof(int) * size);
host_array_b = (int*) malloc(sizeof(int) * size);
cudaMalloc((void **)&device_array_a, size*sizeof(int));
cudaMalloc((void **)&device_array_b, size*sizeof(int));
cudaMemcpyAsync(device_array_a, host_array_a, size*sizeof(int),
cudaMemcpyHostToDevice, stream_app);
void (*kernel) (int*, int*, int, int, int) = &kernel_strided;
/* void (*kernel) (int*, int*, int, int, int) = &kernel_reverse; */
(kernel)<<<blocks, threads, 0, stream_app>>>(device_array_a, device_array_b,
iterations, jump, stride);
pthread_mutex_unlock(mutexapp);
cutilSafeCall(cudaStreamSynchronize(stream_app));
cudaMemcpyAsync(host_array_a, device_array_a, size*sizeof(int),
cudaMemcpyDeviceToHost, stream_app);
cudaFree(device_array_a);
cudaFree(device_array_b);
}
|
9c129d44495a41e3e33c05c2ddfa346183c728b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* CellManager.cu
*
* Created on: 2016/07/07
* Author: yasu7890v
*/
#include "define.h"
#include "CellManager.h"
#include "utils.h"
#include <fstream>
#include <vector>
#include <cassert>
#include <string>
#include <cfloat>
#include <algorithm>
__global__ void genrand_init(hiprandState_t* csarr){
#define RNG_SEED 1
int id = blockDim.x*blockIdx.x + threadIdx.x;
if (id < RNG_STATE_NUM){
hiprand_init((RNG_SEED << 20) + id, id, 0, &csarr[id]);
}
}
__device__ void CellConnectionData::add_atomic(CellIndex idx){
connect_index[atomicAdd(&connect_num,1)]=idx;
}
__device__ CellConnectionData::CellConnectionData():connect_num(0), gj_switch(0){
std::fill(&gj_alloc[0], &gj_alloc[2 * MAX_CONNECT_CELL_NUM], gj_init);
}
#define DCM(ptr,elem_size) hipMalloc((void**)&(ptr),elem_size*MAX_CELL_NUM)
void CellManager_Device::__alloc(){
hipMalloc((void**)&__block_max_store, sizeof(int)*DEDUCT_ZMAX_DIV_NUM);
DCM(state, sizeof(CELL_STATE));
DCM(pos[0], sizeof(CellPos));
DCM(pos[1], sizeof(CellPos));
DCM(fix_origin, sizeof(CellIndex));
DCM(connection_data, sizeof(CellConnectionData));
DCM(pair_index, sizeof(CellIndex));
DCM(ca2p[0], sizeof(real));
DCM(ca2p[1], sizeof(real));
DCM(ca2p_avg, sizeof(real));
DCM(ex_inert, sizeof(real));
DCM(IP3[0], sizeof(real));
DCM(IP3[1], sizeof(real));
DCM(agek, sizeof(real));
DCM(ageb, sizeof(real));
DCM(ex_fat, sizeof(real));
DCM(in_fat, sizeof(real));
DCM(spr_nat_len, sizeof(real));
DCM(rest_div_times, sizeof(int));
DCM(dermis_index, sizeof(CellIndex));
DCM(uid, sizeof(int));
hipMalloc((void**)&zmax, sizeof(real));
hipMalloc((void**)&ncell, sizeof(int));
hipMalloc((void**)&nder, sizeof(int));
hipMalloc((void**)&nmemb, sizeof(int));
hipMalloc((void**)¤t_phase, sizeof(int));
hipMalloc((void**)&sw, sizeof(int));
hipMalloc((void**)&next_uid, sizeof(int));
hipMalloc((void**)&need_reconnect, sizeof(int));
hipMalloc((void**)&remove_queue, sizeof(LockfreeDeviceQueue<CellIndex, MAX_CELL_NUM>));
hipMemset(zmax, 0, sizeof(real));
hipMemset(ncell, 0, sizeof(int));
hipMemset(nder, 0, sizeof(int));
hipMemset(nmemb, 0, sizeof(int));
hipMemset(current_phase, 0, sizeof(int));
hipMemset(sw, 0, sizeof(int));
hipMemset(next_uid, 0, sizeof(int));
hipMemset(need_reconnect, 0, sizeof(int));
LockfreeDeviceQueue<CellIndex, MAX_CELL_NUM> rmq;
hipMemcpy(remove_queue, &rmq, sizeof(LockfreeDeviceQueue<CellIndex, MAX_CELL_NUM>), hipMemcpyHostToDevice);
}
__device__ void CellManager_Device::set_need_reconnect(int need){
atomicExch(need_reconnect, need);
}
__device__ CellDeviceWrapper CellManager_Device::alloc_new_cell(){
set_need_reconnect(NEED_RECONNECT);
CellDeviceWrapper tmp(this,atomicAdd(ncell, 1));
tmp.uid() = atomicAdd(next_uid, 1);
return tmp;
}
__device__ void CellManager_Device::migrate(CellIndex src,CellIndex dest){
#define MIG(elem) elem[dest]=elem[src]
MIG(state);
MIG(pos[0]);
MIG(pos[1]);
MIG(fix_origin);
MIG(connection_data); //slow? invalidated
MIG(pair_index);
MIG(ca2p[0]);
MIG(ca2p[1]);
MIG(ca2p_avg);
MIG(ex_inert);
MIG(IP3[0]);
MIG(IP3[1]);
MIG(agek);
MIG(ageb);
MIG(ex_fat);
MIG(in_fat);
MIG(spr_nat_len);
MIG(rest_div_times);
MIG(dermis_index);
MIG(uid);
#undef MIG
if (pair_index[dest] >= 0){
pair_index[pair_index[dest]] = dest;
}
set_need_reconnect(NEED_RECONNECT);
}
void CellManager::alloc(){
CellManager_Device tmp;
tmp.__alloc();
state=tmp.state;
pos[0]=tmp.pos[0];
pos[1]=tmp.pos[1];
fix_origin=tmp.fix_origin;
connection_data=tmp.connection_data;
pair_index=tmp.pair_index;
ca2p[0]=tmp.ca2p[0];
ca2p[1] = tmp.ca2p[1];
ca2p_avg=tmp.ca2p_avg;
ex_inert=tmp.ex_inert;
IP3[0]=tmp.IP3[0];
IP3[1] = tmp.IP3[1];
agek=tmp.agek;
ageb=tmp.ageb;
ex_fat=tmp.ex_fat;
in_fat=tmp.in_fat;
spr_nat_len=tmp.spr_nat_len;
rest_div_times=tmp.rest_div_times;
dermis_index=tmp.dermis_index;
uid = tmp.uid;
zmax=tmp.zmax;
ncell=tmp.ncell;
nder=tmp.nder;
nmemb=tmp.nmemb;
current_phase=tmp.current_phase;
sw=tmp.sw;
next_uid = tmp.next_uid;
need_reconnect = tmp.need_reconnect;
dev_remove_queue_size = reinterpret_cast<unsigned int*>(tmp.remove_queue); //head address points to size (due to Standard Layout)
hipMalloc((void**)&dev_ptr,sizeof(CellManager_Device));
hipMemcpy(dev_ptr,&tmp,sizeof(CellManager_Device),hipMemcpyHostToDevice);
hipMalloc((void**)&rng_state, sizeof(hiprandState_t)*RNG_STATE_NUM);
genrand_init << <RNG_STATE_NUM / 256 + 1, 256 >> >(rng_state);
}
void CellManager::dealloc(){
//CellManager_Device* ptr=new CellManager_Device();
//hipMemcpy(ptr,dev_ptr,sizeof(CellManager_Device),hipMemcpyDeviceToHost);
hipFree(__block_max_store);
hipFree(state);
hipFree(fix_origin);
hipFree(connection_data);
hipFree(pair_index);
hipFree(pos[0]);
hipFree(pos[1]);
hipFree(ca2p[0]);
hipFree(ca2p[1]);
hipFree(ca2p_avg);
hipFree(ex_inert);
hipFree(IP3[0]);
hipFree(IP3[1]);
hipFree(agek);
hipFree(ageb);
hipFree(ex_fat);
hipFree(in_fat);
hipFree(spr_nat_len);
hipFree(rest_div_times);
hipFree(dermis_index);
hipFree(uid);
hipFree(zmax);
hipFree(ncell);
hipFree(nder);
hipFree(nmemb);
hipFree(current_phase);
hipFree(sw);
hipFree(next_uid);
hipFree(dev_ptr);
hipFree(rng_state);
//delete ptr;
}
__host__ void CellManager::memb_init(int _nmemb,CellConnectionData cs[]){
for(int j=0;j<_nmemb;j++){
const size_t jj=j%NMX;
const size_t kk = j/NMX;
cs[j].connect_index[0]=get_adj_memb_idx<DIR_U>(j);
cs[j].connect_index[1]=get_adj_memb_idx<DIR_L>(j);
cs[j].connect_index[2]=get_adj_memb_idx<DIR_B>(j);
cs[j].connect_index[3]=get_adj_memb_idx<DIR_R>(j);
assert(cs[j].connect_index[0] >= 0 && cs[j].connect_index[1] >= 0 && cs[j].connect_index[2] >= 0 && cs[j].connect_index[3] >= 0);
}
//2-pass
for (int j = 0; j < _nmemb; j++){
const int memb_u = cs[j].connect_index[0];
const int memb_l = cs[j].connect_index[1];
const int memb_b = cs[j].connect_index[2];
const int memb_r = cs[j].connect_index[3];
//auto& memb_lu = cs[j].index[4];
//auto& memb_bl = cs[j].index[5];
//auto& memb_rb = cs[j].index[6];
//auto& memb_ur = cs[j].index[7];
cs[j].connect_index[4] = cs[memb_l].connect_index[0];
cs[j].connect_index[5] = cs[memb_b].connect_index[1];
cs[j].connect_index[6] = cs[memb_r].connect_index[2];
cs[j].connect_index[7] = cs[memb_u].connect_index[3];
cs[j].connect_num = 8;
assert(cs[j].connect_index[4] >= 0 && cs[j].connect_index[5] >= 0 && cs[j].connect_index[6] >= 0 && cs[j].connect_index[7] >= 0);
}
}
__host__ void CellManager::init_with_file(const char* filename,bool resume){
std::ifstream dstrm(filename);
if(!dstrm){
printf("load failed\n");
assert(dstrm);
}
using namespace std;
vector<CELL_STATE> hstate(MAX_CELL_NUM);
vector<CellPos> hpos(MAX_CELL_NUM);
vector<CellIndex> hfix_origin(MAX_CELL_NUM);
vector<CellConnectionData> hconnection_data(MAX_CELL_NUM);
vector<CellIndex> hpair_index(MAX_CELL_NUM);
vector<real> hca2p(MAX_CELL_NUM);
vector<real>hca2p_avg(MAX_CELL_NUM);
vector<real>hex_inert(MAX_CELL_NUM);
vector<real>hagek(MAX_CELL_NUM);
vector<real>hageb(MAX_CELL_NUM);
vector<real>hex_fat(MAX_CELL_NUM);
vector<real>hin_fat(MAX_CELL_NUM);
vector<real>hspr_nat_len(MAX_CELL_NUM);
vector<real>hrad(MAX_CELL_NUM);
vector<int> hrest_div_times(MAX_CELL_NUM);
vector<int> huid(MAX_CELL_NUM);
vector<real> hIP3(MAX_CELL_NUM);
string line;
int id_count=0;
unsigned int phase=0;
int nmemb=0;
int nder=0;
while (std::getline(dstrm, line)) {
sscanf(line.c_str(), "%*d %d " R_FMT " " R_FMT " " R_FMT " " R_FMT " " R_FMT " " R_FMT " " R_FMT " " R_FMT " %d " R_FMT " " R_FMT " %*d " R_FMT " %d %d",
&hstate[id_count],
&hrad[id_count],
&hageb[id_count],
&hagek[id_count],
&hca2p[id_count],
&hpos[id_count].x,
&hpos[id_count].y,
&hpos[id_count].z,
&hca2p_avg[id_count],
&hrest_div_times[id_count],
&hex_fat[id_count],
&hin_fat[id_count],
&hspr_nat_len[id_count],
&hpair_index[id_count],
&hfix_origin[id_count]);
hpos[id_count].w = 0.0f;
/*
BLANKI
*/
CELL_STATE state = hstate[id_count];
real rad = hrad[id_count];
if (state == BLANK)break;
/*
validation
*/
if (SYSTEM == BASAL && (state == ALIVE || state == DEAD || state == AIR)) {
printf(" input date must not contain ALIVE or DEAD in case of BASAL\n");
exit(1);
}
if (state == DER && rad != R_der) {
printf("radii of DER not consistent with param.h %lf %lf\n",rad,R_der);
exit(1);
}
if (state == MEMB && rad != R_memb) {
printf("radii of DER not consistent with param.h %lf %lf\n", rad, R_memb);
exit(1);
}
if (phase == 0 && state != MEMB) {
assert(state == DER);
phase++;
}
if (phase == 1 && state != DER) {
//assert(state == DER);
phase++;
}
if (phase > 0 && state == MEMB) {
printf("non phase0 memb\n");
exit(1);
}
if (phase > 1 && state == DER) {
printf("non phase1 der\n");
exit(1);
}
if (state == FIX){
hrest_div_times[id_count] = DIV_MAX;
printf("FIX\n");
}
if (state == MEMB)nmemb++;
if (state == DER)nder++;
//*(unsigned int*)(&c_pos_h[id_count].w) = c_state_h[id_count];
huid.push_back(id_count);
printf("Phase %d Cell loaded:%d\n", phase, id_count++);
}
assert(nmemb==NMX*NMY);
printf("eusyo0\n");
set_cell_nums(id_count,nmemb,nder);
printf("eusyo1\n");
memb_init(nmemb, &hconnection_data[0]);
if (!resume){
std::fill(hca2p.begin(), hca2p.end(), ca2p_init);
std::fill(hca2p_avg.begin(), hca2p_avg.end(), ca2p_init);
std::fill(hex_inert.begin(), hex_inert.end(), ex_inert_init);
std::fill(hIP3.begin(), hIP3.end(), IP3_init);
for (int i = 0; i < id_count; i++){
if (hstate[i] == DEAD){
hca2p[i] = CDEF(0.0);
hca2p_avg[i] = CDEF(0.0);
}
}
}
hipMemcpy(state, &hstate[0], sizeof(CELL_STATE)*MAX_CELL_NUM, hipMemcpyHostToDevice);
//hipMemcpy(d->c_radius_d, c_radius_h, sizeof(real)*MAX_CELL_NUM, hipMemcpyHostToDevice);
hipMemcpy(ageb, &hageb[0], sizeof(real)*MAX_CELL_NUM, hipMemcpyHostToDevice);
hipMemcpy(agek, &hagek[0], sizeof(real)*MAX_CELL_NUM, hipMemcpyHostToDevice);
hipMemcpy(ca2p[0], &hca2p[0], sizeof(real)*MAX_CELL_NUM, hipMemcpyHostToDevice);
hipMemcpy(ca2p[1], &hca2p[0], sizeof(real)*MAX_CELL_NUM, hipMemcpyHostToDevice);
hipMemcpy(pos[0], &hpos[0], sizeof(CellPos)*MAX_CELL_NUM, hipMemcpyHostToDevice);
hipMemcpy(pos[1], &hpos[0], sizeof(CellPos)*MAX_CELL_NUM, hipMemcpyHostToDevice);
hipMemcpy(ca2p_avg, &hca2p_avg[0], sizeof(real)*MAX_CELL_NUM, hipMemcpyHostToDevice);
hipMemcpy(rest_div_times, &hrest_div_times[0], sizeof(int)*MAX_CELL_NUM, hipMemcpyHostToDevice);
hipMemcpy(ex_fat, &hex_fat[0], sizeof(real)*MAX_CELL_NUM, hipMemcpyHostToDevice);
hipMemcpy(in_fat, &hin_fat[0], sizeof(real)*MAX_CELL_NUM, hipMemcpyHostToDevice);
hipMemcpy(spr_nat_len, &hspr_nat_len[0], sizeof(real)*MAX_CELL_NUM, hipMemcpyHostToDevice);
hipMemcpy(pair_index, &hpair_index[0], sizeof(int)*MAX_CELL_NUM, hipMemcpyHostToDevice);
hipMemcpy(fix_origin, &hfix_origin[0], sizeof(int)*MAX_CELL_NUM, hipMemcpyHostToDevice);
hipMemcpy(connection_data, &hconnection_data[0], sizeof(CellConnectionData)*MAX_CELL_NUM, hipMemcpyHostToDevice);
hipMemcpy(uid, &huid[0], sizeof(int)*MAX_CELL_NUM, hipMemcpyHostToDevice);
hipMemcpy(next_uid, &id_count, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(IP3[0], &hIP3[0], sizeof(real)*MAX_CELL_NUM, hipMemcpyHostToDevice);
hipMemcpy(IP3[1], &hIP3[0], sizeof(real)*MAX_CELL_NUM, hipMemcpyHostToDevice);
}
__host__ void CellManager::switch_phase(){
current_phase_host = 1 - current_phase_host;
hipMemcpy(current_phase, ¤t_phase_host, sizeof(int), hipMemcpyHostToDevice);
}
__host__ void CellManager::fetch_cell_nums(){
hipMemcpy(&ncell_host,ncell,sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(&nder_host,nder,sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(&nmemb_host,nmemb,sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(&sw_host, sw, sizeof(int), hipMemcpyDeviceToHost);
}
bool CellManager::should_force_reconnect(){
int tmp;
hipMemcpy(&tmp, need_reconnect, sizeof(int), hipMemcpyDeviceToHost);
return ( tmp == NEED_RECONNECT);
}
void CellManager::no_need_reconnect(){
int tmp = NO_NEED_RECONNECT;
hipMemcpy(need_reconnect, &tmp, sizeof(int), hipMemcpyHostToDevice);
}
unsigned int CellManager::get_device_remove_queue_size(){
unsigned int tmp;
hipMemcpy(&tmp, dev_remove_queue_size, sizeof(unsigned int), hipMemcpyDeviceToHost);
return tmp;
}
void CellManager::reset_device_remove_queue(){
//unsigned int tmp;
hipMemset(dev_remove_queue_size,0, sizeof(unsigned int));
//return tmp;
}
__host__ void CellManager::set_cell_nums(int _ncell,int _nmemb,int _nder){
ncell_host=_ncell;
nmemb_host=_nmemb;
nder_host = _nder;
printf("eruusi2\n");
hipMemcpy(ncell,&ncell_host,sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(nder,&nder_host,sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(nmemb,&nmemb_host,sizeof(int),hipMemcpyHostToDevice);
}
CellPos* CellManager::current_pos_host(){
return pos[current_phase_host];
}
CellPos* CellManager::next_pos_host(){
return pos[1-current_phase_host];
}
//use fixed blocks
//remain float
__device__ float atomicMaxf(float* address, float val)
{
int *address_as_int = (int*)address;
int old = *address_as_int, assumed;
while (val > __int_as_float(old)) {
assumed = old;
old = atomicCAS(address_as_int, assumed,
__float_as_int(val));
}
return __int_as_float(old);
}
//remain float
__global__ void get_cell_zmax_impl(int ncell,int offset, const CellPos* cpos,const CELL_STATE* cstate, float* out_zmax){
extern __shared__ float shared[];
int tid = threadIdx.x;
int gid = (blockDim.x * blockIdx.x) + tid+offset;
shared[tid] = -FLT_MAX;
//collect out-ranged datas
while (gid < ncell) {
const CELL_STATE st = cstate[gid];
if (st == DEAD || st == ALIVE || st == MUSUME || st == FIX){
shared[tid] = fmaxf(shared[tid], (float)cpos[gid].z);
}
gid += gridDim.x*blockDim.x;
}
__syncthreads();
gid = (blockDim.x * blockIdx.x) + tid; // 1
for (unsigned int s = blockDim.x / 2; s>0; s >>= 1)
{
if (tid < s && gid < ncell)
shared[tid] = fmaxf(shared[tid], shared[tid + s]);
__syncthreads();
}
if (tid == 0)atomicMaxf(out_zmax, shared[0]);
}
real get_cell_zmax(CellManager*cm){
float initf = -FLT_MAX;
float* zmax;
const int offset = cm->nmemb_host + cm->nder_host;
hipMalloc(&zmax, sizeof(float));
hipMemcpy(zmax, &initf, sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( get_cell_zmax_impl), dim3((cm->ncell_host-offset-1)/256+1),dim3(256),256*sizeof(float), 0, cm->ncell_host,offset, cm->current_pos_host(),cm->state, zmax);
hipMemcpy(&initf, zmax, sizeof(float), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
return (real)initf;
} | 9c129d44495a41e3e33c05c2ddfa346183c728b9.cu | /*
* CellManager.cu
*
* Created on: 2016/07/07
* Author: yasu7890v
*/
#include "define.h"
#include "CellManager.h"
#include "utils.h"
#include <fstream>
#include <vector>
#include <cassert>
#include <string>
#include <cfloat>
#include <algorithm>
__global__ void genrand_init(curandState* csarr){
#define RNG_SEED 1
int id = blockDim.x*blockIdx.x + threadIdx.x;
if (id < RNG_STATE_NUM){
curand_init((RNG_SEED << 20) + id, id, 0, &csarr[id]);
}
}
__device__ void CellConnectionData::add_atomic(CellIndex idx){
connect_index[atomicAdd(&connect_num,1)]=idx;
}
__device__ CellConnectionData::CellConnectionData():connect_num(0), gj_switch(0){
std::fill(&gj_alloc[0], &gj_alloc[2 * MAX_CONNECT_CELL_NUM], gj_init);
}
#define DCM(ptr,elem_size) cudaMalloc((void**)&(ptr),elem_size*MAX_CELL_NUM)
void CellManager_Device::__alloc(){
cudaMalloc((void**)&__block_max_store, sizeof(int)*DEDUCT_ZMAX_DIV_NUM);
DCM(state, sizeof(CELL_STATE));
DCM(pos[0], sizeof(CellPos));
DCM(pos[1], sizeof(CellPos));
DCM(fix_origin, sizeof(CellIndex));
DCM(connection_data, sizeof(CellConnectionData));
DCM(pair_index, sizeof(CellIndex));
DCM(ca2p[0], sizeof(real));
DCM(ca2p[1], sizeof(real));
DCM(ca2p_avg, sizeof(real));
DCM(ex_inert, sizeof(real));
DCM(IP3[0], sizeof(real));
DCM(IP3[1], sizeof(real));
DCM(agek, sizeof(real));
DCM(ageb, sizeof(real));
DCM(ex_fat, sizeof(real));
DCM(in_fat, sizeof(real));
DCM(spr_nat_len, sizeof(real));
DCM(rest_div_times, sizeof(int));
DCM(dermis_index, sizeof(CellIndex));
DCM(uid, sizeof(int));
cudaMalloc((void**)&zmax, sizeof(real));
cudaMalloc((void**)&ncell, sizeof(int));
cudaMalloc((void**)&nder, sizeof(int));
cudaMalloc((void**)&nmemb, sizeof(int));
cudaMalloc((void**)¤t_phase, sizeof(int));
cudaMalloc((void**)&sw, sizeof(int));
cudaMalloc((void**)&next_uid, sizeof(int));
cudaMalloc((void**)&need_reconnect, sizeof(int));
cudaMalloc((void**)&remove_queue, sizeof(LockfreeDeviceQueue<CellIndex, MAX_CELL_NUM>));
cudaMemset(zmax, 0, sizeof(real));
cudaMemset(ncell, 0, sizeof(int));
cudaMemset(nder, 0, sizeof(int));
cudaMemset(nmemb, 0, sizeof(int));
cudaMemset(current_phase, 0, sizeof(int));
cudaMemset(sw, 0, sizeof(int));
cudaMemset(next_uid, 0, sizeof(int));
cudaMemset(need_reconnect, 0, sizeof(int));
LockfreeDeviceQueue<CellIndex, MAX_CELL_NUM> rmq;
cudaMemcpy(remove_queue, &rmq, sizeof(LockfreeDeviceQueue<CellIndex, MAX_CELL_NUM>), cudaMemcpyHostToDevice);
}
__device__ void CellManager_Device::set_need_reconnect(int need){
atomicExch(need_reconnect, need);
}
__device__ CellDeviceWrapper CellManager_Device::alloc_new_cell(){
set_need_reconnect(NEED_RECONNECT);
CellDeviceWrapper tmp(this,atomicAdd(ncell, 1));
tmp.uid() = atomicAdd(next_uid, 1);
return tmp;
}
__device__ void CellManager_Device::migrate(CellIndex src,CellIndex dest){
#define MIG(elem) elem[dest]=elem[src]
MIG(state);
MIG(pos[0]);
MIG(pos[1]);
MIG(fix_origin);
MIG(connection_data); //slow? invalidated
MIG(pair_index);
MIG(ca2p[0]);
MIG(ca2p[1]);
MIG(ca2p_avg);
MIG(ex_inert);
MIG(IP3[0]);
MIG(IP3[1]);
MIG(agek);
MIG(ageb);
MIG(ex_fat);
MIG(in_fat);
MIG(spr_nat_len);
MIG(rest_div_times);
MIG(dermis_index);
MIG(uid);
#undef MIG
if (pair_index[dest] >= 0){
pair_index[pair_index[dest]] = dest;
}
set_need_reconnect(NEED_RECONNECT);
}
void CellManager::alloc(){
CellManager_Device tmp;
tmp.__alloc();
state=tmp.state;
pos[0]=tmp.pos[0];
pos[1]=tmp.pos[1];
fix_origin=tmp.fix_origin;
connection_data=tmp.connection_data;
pair_index=tmp.pair_index;
ca2p[0]=tmp.ca2p[0];
ca2p[1] = tmp.ca2p[1];
ca2p_avg=tmp.ca2p_avg;
ex_inert=tmp.ex_inert;
IP3[0]=tmp.IP3[0];
IP3[1] = tmp.IP3[1];
agek=tmp.agek;
ageb=tmp.ageb;
ex_fat=tmp.ex_fat;
in_fat=tmp.in_fat;
spr_nat_len=tmp.spr_nat_len;
rest_div_times=tmp.rest_div_times;
dermis_index=tmp.dermis_index;
uid = tmp.uid;
zmax=tmp.zmax;
ncell=tmp.ncell;
nder=tmp.nder;
nmemb=tmp.nmemb;
current_phase=tmp.current_phase;
sw=tmp.sw;
next_uid = tmp.next_uid;
need_reconnect = tmp.need_reconnect;
dev_remove_queue_size = reinterpret_cast<unsigned int*>(tmp.remove_queue); //head address points to size (due to Standard Layout)
cudaMalloc((void**)&dev_ptr,sizeof(CellManager_Device));
cudaMemcpy(dev_ptr,&tmp,sizeof(CellManager_Device),cudaMemcpyHostToDevice);
cudaMalloc((void**)&rng_state, sizeof(curandState)*RNG_STATE_NUM);
genrand_init << <RNG_STATE_NUM / 256 + 1, 256 >> >(rng_state);
}
void CellManager::dealloc(){
//CellManager_Device* ptr=new CellManager_Device();
//cudaMemcpy(ptr,dev_ptr,sizeof(CellManager_Device),cudaMemcpyDeviceToHost);
cudaFree(__block_max_store);
cudaFree(state);
cudaFree(fix_origin);
cudaFree(connection_data);
cudaFree(pair_index);
cudaFree(pos[0]);
cudaFree(pos[1]);
cudaFree(ca2p[0]);
cudaFree(ca2p[1]);
cudaFree(ca2p_avg);
cudaFree(ex_inert);
cudaFree(IP3[0]);
cudaFree(IP3[1]);
cudaFree(agek);
cudaFree(ageb);
cudaFree(ex_fat);
cudaFree(in_fat);
cudaFree(spr_nat_len);
cudaFree(rest_div_times);
cudaFree(dermis_index);
cudaFree(uid);
cudaFree(zmax);
cudaFree(ncell);
cudaFree(nder);
cudaFree(nmemb);
cudaFree(current_phase);
cudaFree(sw);
cudaFree(next_uid);
cudaFree(dev_ptr);
cudaFree(rng_state);
//delete ptr;
}
__host__ void CellManager::memb_init(int _nmemb,CellConnectionData cs[]){
for(int j=0;j<_nmemb;j++){
const size_t jj=j%NMX;
const size_t kk = j/NMX;
cs[j].connect_index[0]=get_adj_memb_idx<DIR_U>(j);
cs[j].connect_index[1]=get_adj_memb_idx<DIR_L>(j);
cs[j].connect_index[2]=get_adj_memb_idx<DIR_B>(j);
cs[j].connect_index[3]=get_adj_memb_idx<DIR_R>(j);
assert(cs[j].connect_index[0] >= 0 && cs[j].connect_index[1] >= 0 && cs[j].connect_index[2] >= 0 && cs[j].connect_index[3] >= 0);
}
//2-pass
for (int j = 0; j < _nmemb; j++){
const int memb_u = cs[j].connect_index[0];
const int memb_l = cs[j].connect_index[1];
const int memb_b = cs[j].connect_index[2];
const int memb_r = cs[j].connect_index[3];
//auto& memb_lu = cs[j].index[4];
//auto& memb_bl = cs[j].index[5];
//auto& memb_rb = cs[j].index[6];
//auto& memb_ur = cs[j].index[7];
cs[j].connect_index[4] = cs[memb_l].connect_index[0];
cs[j].connect_index[5] = cs[memb_b].connect_index[1];
cs[j].connect_index[6] = cs[memb_r].connect_index[2];
cs[j].connect_index[7] = cs[memb_u].connect_index[3];
cs[j].connect_num = 8;
assert(cs[j].connect_index[4] >= 0 && cs[j].connect_index[5] >= 0 && cs[j].connect_index[6] >= 0 && cs[j].connect_index[7] >= 0);
}
}
__host__ void CellManager::init_with_file(const char* filename,bool resume){
std::ifstream dstrm(filename);
if(!dstrm){
printf("load failed\n");
assert(dstrm);
}
using namespace std;
vector<CELL_STATE> hstate(MAX_CELL_NUM);
vector<CellPos> hpos(MAX_CELL_NUM);
vector<CellIndex> hfix_origin(MAX_CELL_NUM);
vector<CellConnectionData> hconnection_data(MAX_CELL_NUM);
vector<CellIndex> hpair_index(MAX_CELL_NUM);
vector<real> hca2p(MAX_CELL_NUM);
vector<real>hca2p_avg(MAX_CELL_NUM);
vector<real>hex_inert(MAX_CELL_NUM);
vector<real>hagek(MAX_CELL_NUM);
vector<real>hageb(MAX_CELL_NUM);
vector<real>hex_fat(MAX_CELL_NUM);
vector<real>hin_fat(MAX_CELL_NUM);
vector<real>hspr_nat_len(MAX_CELL_NUM);
vector<real>hrad(MAX_CELL_NUM);
vector<int> hrest_div_times(MAX_CELL_NUM);
vector<int> huid(MAX_CELL_NUM);
vector<real> hIP3(MAX_CELL_NUM);
string line;
int id_count=0;
unsigned int phase=0;
int nmemb=0;
int nder=0;
while (std::getline(dstrm, line)) {
sscanf(line.c_str(), "%*d %d " R_FMT " " R_FMT " " R_FMT " " R_FMT " " R_FMT " " R_FMT " " R_FMT " " R_FMT " %d " R_FMT " " R_FMT " %*d " R_FMT " %d %d",
&hstate[id_count],
&hrad[id_count],
&hageb[id_count],
&hagek[id_count],
&hca2p[id_count],
&hpos[id_count].x,
&hpos[id_count].y,
&hpos[id_count].z,
&hca2p_avg[id_count],
&hrest_div_times[id_count],
&hex_fat[id_count],
&hin_fat[id_count],
&hspr_nat_len[id_count],
&hpair_index[id_count],
&hfix_origin[id_count]);
hpos[id_count].w = 0.0f;
/*
BLANK�ɂ��ǂ蒅������I��
*/
CELL_STATE state = hstate[id_count];
real rad = hrad[id_count];
if (state == BLANK)break;
/*
validation
*/
if (SYSTEM == BASAL && (state == ALIVE || state == DEAD || state == AIR)) {
printf(" input date must not contain ALIVE or DEAD in case of BASAL\n");
exit(1);
}
if (state == DER && rad != R_der) {
printf("radii of DER not consistent with param.h %lf %lf\n",rad,R_der);
exit(1);
}
if (state == MEMB && rad != R_memb) {
printf("radii of DER not consistent with param.h %lf %lf\n", rad, R_memb);
exit(1);
}
if (phase == 0 && state != MEMB) {
assert(state == DER);
phase++;
}
if (phase == 1 && state != DER) {
//assert(state == DER);
phase++;
}
if (phase > 0 && state == MEMB) {
printf("non phase0 memb\n");
exit(1);
}
if (phase > 1 && state == DER) {
printf("non phase1 der\n");
exit(1);
}
if (state == FIX){
hrest_div_times[id_count] = DIV_MAX;
printf("FIX\n");
}
if (state == MEMB)nmemb++;
if (state == DER)nder++;
//*(unsigned int*)(&c_pos_h[id_count].w) = c_state_h[id_count];
huid.push_back(id_count);
printf("Phase %d Cell loaded:%d\n", phase, id_count++);
}
assert(nmemb==NMX*NMY);
printf("eusyo0\n");
set_cell_nums(id_count,nmemb,nder);
printf("eusyo1\n");
memb_init(nmemb, &hconnection_data[0]);
if (!resume){
std::fill(hca2p.begin(), hca2p.end(), ca2p_init);
std::fill(hca2p_avg.begin(), hca2p_avg.end(), ca2p_init);
std::fill(hex_inert.begin(), hex_inert.end(), ex_inert_init);
std::fill(hIP3.begin(), hIP3.end(), IP3_init);
for (int i = 0; i < id_count; i++){
if (hstate[i] == DEAD){
hca2p[i] = CDEF(0.0);
hca2p_avg[i] = CDEF(0.0);
}
}
}
cudaMemcpy(state, &hstate[0], sizeof(CELL_STATE)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
//cudaMemcpy(d->c_radius_d, c_radius_h, sizeof(real)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(ageb, &hageb[0], sizeof(real)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(agek, &hagek[0], sizeof(real)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(ca2p[0], &hca2p[0], sizeof(real)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(ca2p[1], &hca2p[0], sizeof(real)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(pos[0], &hpos[0], sizeof(CellPos)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(pos[1], &hpos[0], sizeof(CellPos)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(ca2p_avg, &hca2p_avg[0], sizeof(real)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(rest_div_times, &hrest_div_times[0], sizeof(int)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(ex_fat, &hex_fat[0], sizeof(real)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(in_fat, &hin_fat[0], sizeof(real)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(spr_nat_len, &hspr_nat_len[0], sizeof(real)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(pair_index, &hpair_index[0], sizeof(int)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(fix_origin, &hfix_origin[0], sizeof(int)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(connection_data, &hconnection_data[0], sizeof(CellConnectionData)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(uid, &huid[0], sizeof(int)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(next_uid, &id_count, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(IP3[0], &hIP3[0], sizeof(real)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(IP3[1], &hIP3[0], sizeof(real)*MAX_CELL_NUM, cudaMemcpyHostToDevice);
}
__host__ void CellManager::switch_phase(){
current_phase_host = 1 - current_phase_host;
cudaMemcpy(current_phase, ¤t_phase_host, sizeof(int), cudaMemcpyHostToDevice);
}
__host__ void CellManager::fetch_cell_nums(){
cudaMemcpy(&ncell_host,ncell,sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(&nder_host,nder,sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(&nmemb_host,nmemb,sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(&sw_host, sw, sizeof(int), cudaMemcpyDeviceToHost);
}
bool CellManager::should_force_reconnect(){
int tmp;
cudaMemcpy(&tmp, need_reconnect, sizeof(int), cudaMemcpyDeviceToHost);
return ( tmp == NEED_RECONNECT);
}
void CellManager::no_need_reconnect(){
int tmp = NO_NEED_RECONNECT;
cudaMemcpy(need_reconnect, &tmp, sizeof(int), cudaMemcpyHostToDevice);
}
unsigned int CellManager::get_device_remove_queue_size(){
unsigned int tmp;
cudaMemcpy(&tmp, dev_remove_queue_size, sizeof(unsigned int), cudaMemcpyDeviceToHost);
return tmp;
}
void CellManager::reset_device_remove_queue(){
//unsigned int tmp;
cudaMemset(dev_remove_queue_size,0, sizeof(unsigned int));
//return tmp;
}
__host__ void CellManager::set_cell_nums(int _ncell,int _nmemb,int _nder){
ncell_host=_ncell;
nmemb_host=_nmemb;
nder_host = _nder;
printf("eruusi2\n");
cudaMemcpy(ncell,&ncell_host,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(nder,&nder_host,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(nmemb,&nmemb_host,sizeof(int),cudaMemcpyHostToDevice);
}
CellPos* CellManager::current_pos_host(){
return pos[current_phase_host];
}
CellPos* CellManager::next_pos_host(){
return pos[1-current_phase_host];
}
//use fixed blocks
//remain float
__device__ float atomicMaxf(float* address, float val)
{
int *address_as_int = (int*)address;
int old = *address_as_int, assumed;
while (val > __int_as_float(old)) {
assumed = old;
old = atomicCAS(address_as_int, assumed,
__float_as_int(val));
}
return __int_as_float(old);
}
//remain float
__global__ void get_cell_zmax_impl(int ncell,int offset, const CellPos* cpos,const CELL_STATE* cstate, float* out_zmax){
extern __shared__ float shared[];
int tid = threadIdx.x;
int gid = (blockDim.x * blockIdx.x) + tid+offset;
shared[tid] = -FLT_MAX;
//collect out-ranged datas
while (gid < ncell) {
const CELL_STATE st = cstate[gid];
if (st == DEAD || st == ALIVE || st == MUSUME || st == FIX){
shared[tid] = fmaxf(shared[tid], (float)cpos[gid].z);
}
gid += gridDim.x*blockDim.x;
}
__syncthreads();
gid = (blockDim.x * blockIdx.x) + tid; // 1
for (unsigned int s = blockDim.x / 2; s>0; s >>= 1)
{
if (tid < s && gid < ncell)
shared[tid] = fmaxf(shared[tid], shared[tid + s]);
__syncthreads();
}
if (tid == 0)atomicMaxf(out_zmax, shared[0]);
}
real get_cell_zmax(CellManager*cm){
float initf = -FLT_MAX;
float* zmax;
const int offset = cm->nmemb_host + cm->nder_host;
cudaMalloc(&zmax, sizeof(float));
cudaMemcpy(zmax, &initf, sizeof(float), cudaMemcpyHostToDevice);
get_cell_zmax_impl<<<(cm->ncell_host-offset-1)/256+1,256,256*sizeof(float)>>>(cm->ncell_host,offset, cm->current_pos_host(),cm->state, zmax);
cudaMemcpy(&initf, zmax, sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
return (real)initf;
} |
2dcb16206740eeff1d96a2843978c78f81205c53.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 20.04.2018
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <PointersManager.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template <typename T, typename Z>
static __global__ void global_mergeMaxIndex_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<Z*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T mVal = -DataTypeUtils::max<T>();
Z mIdx(0);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
auto val = x[shape::getIndexOffset(e, xShape)];;
if (mVal < val) {
mIdx = static_cast<Z>(i);
mVal = val;
}
}
__syncthreads();
output[shape::getIndexOffset(e, outputShape)] = mIdx;
}
}
template <typename T, typename Z>
static void mergeMaxIndex_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeMaxIndex");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
hipLaunchKernelGGL(( global_mergeMaxIndex_<T,Z>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeMaxIndex(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
NDArray::prepareSpecialUse({&output}, {});
for (auto v:inArrs)
v->syncToDevice();
BUILD_DOUBLE_SELECTOR(inArrs[0]->dataType(), output.dataType(), mergeMaxIndex_, (context, inArrs, output), LIBND4J_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({&output}, {});
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeMax_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T mVal = -DataTypeUtils::max<T>();
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
auto val = x[shape::getIndexOffset(e, xShape)];;
if (mVal < val)
mVal = val;
}
__syncthreads();
output[shape::getIndexOffset(e, outputShape)] = mVal;
}
}
template<typename T>
static void mergeMax_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeMax");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
hipLaunchKernelGGL(( global_mergeMax_<T>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeMax(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
NDArray::prepareSpecialUse({&output}, {});
for (auto v:inArrs)
v->syncToDevice();
BUILD_SINGLE_SELECTOR(output.dataType(), mergeMax_, (context, inArrs, output), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {});
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeAvg_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T sum(0.0f);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
sum += x[shape::getIndexOffset(e, xShape)];
}
output[shape::getIndexOffset(e, outputShape)] = sum / numArrays;
}
}
template<typename T>
static void mergeAvg_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeAvg");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
hipLaunchKernelGGL(( global_mergeAvg_<T>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeAvg(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
NDArray::prepareSpecialUse({&output}, {});
for (auto v:inArrs)
v->syncToDevice();
BUILD_SINGLE_SELECTOR(output.dataType(), mergeAvg_, (context, inArrs, output), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {});
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeAdd_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T sum(0.0f);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
sum += x[shape::getIndexOffset(e, xShape)];
}
output[shape::getIndexOffset(e, outputShape)] = sum;
}
}
template<typename T>
static void mergeAdd_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeAdd");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
hipLaunchKernelGGL(( global_mergeAdd_<T>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template void mergeAdd_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), NUMERIC_TYPES);
void mergeAdd(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
NDArray::prepareSpecialUse({&output}, {});
for (auto v:inArrs)
v->syncToDevice();
BUILD_SINGLE_SELECTOR(output.dataType(), mergeAdd_, (context, inArrs, output), NUMERIC_TYPES);
NDArray::registerSpecialUse({&output}, {});
}
}
}
} | 2dcb16206740eeff1d96a2843978c78f81205c53.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 20.04.2018
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <PointersManager.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template <typename T, typename Z>
static __global__ void global_mergeMaxIndex_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<Z*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T mVal = -DataTypeUtils::max<T>();
Z mIdx(0);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
auto val = x[shape::getIndexOffset(e, xShape)];;
if (mVal < val) {
mIdx = static_cast<Z>(i);
mVal = val;
}
}
__syncthreads();
output[shape::getIndexOffset(e, outputShape)] = mIdx;
}
}
template <typename T, typename Z>
static void mergeMaxIndex_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeMaxIndex");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
global_mergeMaxIndex_<T,Z><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeMaxIndex(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
NDArray::prepareSpecialUse({&output}, {});
for (auto v:inArrs)
v->syncToDevice();
BUILD_DOUBLE_SELECTOR(inArrs[0]->dataType(), output.dataType(), mergeMaxIndex_, (context, inArrs, output), LIBND4J_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({&output}, {});
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeMax_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T mVal = -DataTypeUtils::max<T>();
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
auto val = x[shape::getIndexOffset(e, xShape)];;
if (mVal < val)
mVal = val;
}
__syncthreads();
output[shape::getIndexOffset(e, outputShape)] = mVal;
}
}
template<typename T>
static void mergeMax_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeMax");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
global_mergeMax_<T><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeMax(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
NDArray::prepareSpecialUse({&output}, {});
for (auto v:inArrs)
v->syncToDevice();
BUILD_SINGLE_SELECTOR(output.dataType(), mergeMax_, (context, inArrs, output), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {});
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeAvg_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T sum(0.0f);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
sum += x[shape::getIndexOffset(e, xShape)];
}
output[shape::getIndexOffset(e, outputShape)] = sum / numArrays;
}
}
template<typename T>
static void mergeAvg_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeAvg");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
global_mergeAvg_<T><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeAvg(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
NDArray::prepareSpecialUse({&output}, {});
for (auto v:inArrs)
v->syncToDevice();
BUILD_SINGLE_SELECTOR(output.dataType(), mergeAvg_, (context, inArrs, output), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {});
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeAdd_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T sum(0.0f);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
sum += x[shape::getIndexOffset(e, xShape)];
}
output[shape::getIndexOffset(e, outputShape)] = sum;
}
}
template<typename T>
static void mergeAdd_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeAdd");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
global_mergeAdd_<T><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template void mergeAdd_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), NUMERIC_TYPES);
void mergeAdd(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
NDArray::prepareSpecialUse({&output}, {});
for (auto v:inArrs)
v->syncToDevice();
BUILD_SINGLE_SELECTOR(output.dataType(), mergeAdd_, (context, inArrs, output), NUMERIC_TYPES);
NDArray::registerSpecialUse({&output}, {});
}
}
}
} |
ca419fbe2df5494057a3b4042f12a4cb43090754.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/fastertransformer/layers/attention_layers/FusedAttentionLayer.h"
namespace fastertransformer {
__global__ void trt_add_QKV_bias(half2* qkv_buf,
const half2* Q,
const half2* bias_Q,
const half2* K,
const half2* bias_K,
const half2* V,
const half2* bias_V,
const int valid_word_num,
const int head_num,
const int size_per_head)
{
// Add bias, and then transpose from
// [3, valid_word_num, head, size] -> [valid_word_num, head, 3, size]
// const int seq_id = blockIdx.x % valid_word_num;
// const int qkv_id = (blockIdx.x - seq_id) / valid_word_num;
const int seq_id = blockIdx.x;
for (int index = threadIdx.x; index < head_num * size_per_head; index += blockDim.x) {
const int size_id = index % size_per_head;
const int head_id = (index - size_id) / size_per_head;
const int target_offset = blockIdx.x * head_num * 3 * size_per_head + head_id * 3 * size_per_head;
const int src_id = seq_id * head_num * size_per_head + index;
qkv_buf[target_offset + 0 * size_per_head + size_id] = Q[src_id] + bias_Q[index];
qkv_buf[target_offset + 1 * size_per_head + size_id] = K[src_id] + bias_K[index];
qkv_buf[target_offset + 2 * size_per_head + size_id] = V[src_id] + bias_V[index];
}
}
template<typename T>
void FusedAttentionLayer<T>::invokeTrtAddQkvBias(size_t token_num, const AttentionWeight<T>* attention_weights)
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
dim3 grid(token_num);
dim3 block(min((int)(head_num_ * size_per_head_ / 2), 512));
hipLaunchKernelGGL(( trt_add_QKV_bias), dim3(grid), dim3(block), 0, stream_, (half2*)qkv_buf_,
(const half2*)q_buf_,
(const half2*)attention_weights->query_weight.bias,
(const half2*)k_buf_,
(const half2*)attention_weights->key_weight.bias,
(const half2*)v_buf_,
(const half2*)attention_weights->value_weight.bias,
token_num,
head_num_,
size_per_head_ / 2);
}
template<typename T>
void FusedAttentionLayer<T>::forward(TensorMap* output_tensors,
TensorMap* input_tensors,
const AttentionWeight<T>* attention_weights)
{
// input_tensors: [input_query (h_token_num, d_model),
// attention_mask (batch, 1, seqlen, seqlen),
// padding_offset (batch + 1 or batch * 2 + 1))]
// If padding_offset.data is nullptr, then not remove padding
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
const int request_batch_size = input_tensors->at("attention_mask").shape[0];
const int request_seq_len = input_tensors->at("attention_mask").shape[2];
allocateBuffer(request_batch_size, request_seq_len);
T* attention_out = output_tensors->getPtr<T>("hidden_features");
const T* from_tensor = input_tensors->getPtr<T>("input_query");
const T* attention_mask = input_tensors->getPtr<T>("attention_mask");
const int* padding_offset = input_tensors->getPtr<int>("padding_offset");
size_t m_tmp = input_tensors->at("input_query").shape[0];
if (m_tmp % 8 != 0) {
m_tmp = (m_tmp / 8 + 1) * 8;
}
const size_t m = input_tensors->at("input_query").shape[0];
int k = d_model_;
int n = hidden_units_;
#ifdef SPARSITY_ENABLED
const size_t m_padded = m_tmp;
if (sparse_ && cublas_wrapper_->isUseSparse(1, n, m, k)) {
cublas_wrapper_->SpGemm(
HIPBLAS_OP_N, HIPBLAS_OP_N, n, m_padded, k, attention_weights->query_weight.sp_kernel, from_tensor, q_buf_);
cublas_wrapper_->SpGemm(
HIPBLAS_OP_N, HIPBLAS_OP_N, n, m_padded, k, attention_weights->key_weight.sp_kernel, from_tensor, k_buf_);
cublas_wrapper_->SpGemm(
HIPBLAS_OP_N, HIPBLAS_OP_N, n, m_padded, k, attention_weights->value_weight.sp_kernel, from_tensor, v_buf_);
}
else {
#endif
const bool is_batched_QKV_ = cublas_wrapper_->isFuseBatchGemm(3, n, m, k);
if (is_batched_QKV_) {
const T* hA[]{attention_weights->query_weight.kernel,
attention_weights->key_weight.kernel,
attention_weights->value_weight.kernel,
nullptr,
from_tensor,
from_tensor,
from_tensor,
nullptr,
q_buf_,
k_buf_,
v_buf_,
nullptr};
// Note: Here, we assume the weights of each time may be different.
// If we can preprocess these weights before inference, we can reduce the overhead
// caused by hipMemcpyAsync
check_cuda_error(
hipMemcpyAsync((void*)batch_qkv_kernel_ptr_, hA, sizeof(T*) * 12, hipMemcpyHostToDevice, stream_));
cublas_wrapper_->batchedGemm(HIPBLAS_OP_N,
HIPBLAS_OP_N,
n,
m,
k,
(const void* const*)batch_qkv_kernel_ptr_,
n,
(const void* const*)batch_qkv_input_ptr_,
k,
(void* const*)batch_qkv_buf_ptr_,
n,
3);
}
else {
cublas_wrapper_->Gemm(HIPBLAS_OP_N,
HIPBLAS_OP_N,
n,
m,
k,
attention_weights->query_weight.kernel,
n,
from_tensor,
k,
q_buf_,
n);
cublas_wrapper_->Gemm(
HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, attention_weights->key_weight.kernel, n, from_tensor, k, k_buf_, n);
cublas_wrapper_->Gemm(HIPBLAS_OP_N,
HIPBLAS_OP_N,
n,
m,
k,
attention_weights->value_weight.kernel,
n,
from_tensor,
k,
v_buf_,
n);
}
#ifdef SPARSITY_ENABLED
}
#endif
invokeTrtAddQkvBias(m, attention_weights);
sync_check_cuda_error();
int S = dispatcher_fp16->getSFromMaxSeqLen(request_seq_len);
FT_CHECK(dispatcher_fp16->isValid(S, false));
const int B = input_tensors->at("padding_offset").shape[0] - 1;
dispatcher_fp16->setup(S, B);
dispatcher_fp16->run(qkv_buf_, nullptr, padding_offset, attn_workspace_, qkv_buf_2_, stream_);
sync_check_cuda_error();
k = hidden_units_;
n = d_model_;
#ifdef SPARSITY_ENABLED
if (sparse_ && cublas_wrapper_->isUseSparse(1, n, m, k)) {
cublas_wrapper_->SpGemm(HIPBLAS_OP_N,
HIPBLAS_OP_N,
n,
m_padded,
k,
attention_weights->attention_output_weight.sp_kernel,
qkv_buf_2_,
attention_out);
}
else {
#endif
cublas_wrapper_->Gemm(HIPBLAS_OP_N,
HIPBLAS_OP_N,
n,
m,
k,
attention_weights->attention_output_weight.kernel,
n,
qkv_buf_2_,
k,
attention_out,
n);
#ifdef SPARSITY_ENABLED
}
#endif
if (is_free_buffer_after_forward_ == true) {
freeBuffer();
}
}
template<typename T>
FusedAttentionLayer<T>::FusedAttentionLayer(size_t max_batch_size,
size_t max_seq_len,
size_t head_num,
size_t size_per_head,
size_t d_model,
int sm,
float q_scaling,
hipStream_t stream,
cublasMMWrapper* cublas_wrapper,
IAllocator* allocator,
bool is_free_buffer_after_forward,
bool sparse):
BaseAttentionLayer<T>(stream, cublas_wrapper, allocator, is_free_buffer_after_forward),
head_num_(head_num),
size_per_head_(size_per_head),
d_model_(d_model),
sm_(sm),
q_scaling_(q_scaling),
sparse_(sparse)
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
if (((sm_ == kSM_70 || sm_ == kSM_86 || sm_ == kSM_80 || sm_ == kSM_75 || sm_ == kSM_72) && size_per_head_ == 64)
|| ((sm_ == kSM_86 || sm_ == kSM_80 || sm_ == kSM_75) && size_per_head_ == 32)) {
dispatcher_fp16.reset(new FusedMHARunnerFP16v2(head_num_, size_per_head_, sm_, q_scaling_));
}
else {
throw std::runtime_error(std::string("[FT][ERROR] FusedAttentionLayer not support \n"));
}
hidden_units_ = head_num_ * size_per_head_;
}
template<typename T>
FusedAttentionLayer<T>::FusedAttentionLayer(FusedAttentionLayer<T> const& attention_layer):
FusedAttentionLayer(0,
0,
attention_layer.head_num_,
attention_layer.size_per_head_,
attention_layer.d_model_,
attention_layer.sm_,
attention_layer.q_scaling_,
attention_layer.stream_,
attention_layer.cublas_wrapper_,
attention_layer.allocator_,
attention_layer.is_free_buffer_after_forward_,
attention_layer.sparse_)
{
}
template<typename T>
FusedAttentionLayer<T>::~FusedAttentionLayer()
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
cublas_wrapper_ = nullptr;
freeBuffer();
}
template<typename T>
void FusedAttentionLayer<T>::allocateBuffer()
{
FT_CHECK(false);
}
template<typename T>
void FusedAttentionLayer<T>::allocateBuffer(size_t batch_size, size_t seq_len)
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
q_buf_ = (T*)allocator_->reMalloc(q_buf_, sizeof(T) * batch_size * seq_len * hidden_units_, false);
k_buf_ = (T*)allocator_->reMalloc(k_buf_, sizeof(T) * batch_size * seq_len * hidden_units_, false);
v_buf_ = (T*)allocator_->reMalloc(v_buf_, sizeof(T) * batch_size * seq_len * hidden_units_, false);
qkv_buf_ = (T*)allocator_->reMalloc(qkv_buf_, sizeof(T) * 3 * batch_size * seq_len * hidden_units_, false);
qkv_buf_2_ = (T*)allocator_->reMalloc(qkv_buf_2_, sizeof(T) * batch_size * seq_len * hidden_units_, false);
attn_workspace_ = (T*)allocator_->reMalloc(attn_workspace_, dispatcher_fp16->getWorkspaceSize(), false);
batch_qkv_kernel_ptr_ = (T**)allocator_->reMalloc(batch_qkv_kernel_ptr_, sizeof(T*) * 12, false);
batch_qkv_input_ptr_ = batch_qkv_kernel_ptr_ + 4;
batch_qkv_buf_ptr_ = batch_qkv_input_ptr_ + 4;
is_allocate_buffer_ = true;
}
template<typename T>
void FusedAttentionLayer<T>::freeBuffer()
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
if (is_allocate_buffer_) {
allocator_->free((void**)(&q_buf_));
allocator_->free((void**)(&k_buf_));
allocator_->free((void**)(&v_buf_));
allocator_->free((void**)(&qkv_buf_));
allocator_->free((void**)(&qkv_buf_2_));
allocator_->free((void**)(&attn_workspace_));
allocator_->free((void**)(&batch_qkv_kernel_ptr_));
sync_check_cuda_error();
is_allocate_buffer_ = false;
}
}
template<typename T>
bool FusedAttentionLayer<T>::isValidSeqLen(const size_t seq_len)
{
return true;
}
template class FusedAttentionLayer<float>;
template class FusedAttentionLayer<half>;
#ifdef ENABLE_BF16
template class FusedAttentionLayer<__nv_bfloat16>;
#endif
} // namespace fastertransformer
| ca419fbe2df5494057a3b4042f12a4cb43090754.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/fastertransformer/layers/attention_layers/FusedAttentionLayer.h"
namespace fastertransformer {
__global__ void trt_add_QKV_bias(half2* qkv_buf,
const half2* Q,
const half2* bias_Q,
const half2* K,
const half2* bias_K,
const half2* V,
const half2* bias_V,
const int valid_word_num,
const int head_num,
const int size_per_head)
{
// Add bias, and then transpose from
// [3, valid_word_num, head, size] -> [valid_word_num, head, 3, size]
// const int seq_id = blockIdx.x % valid_word_num;
// const int qkv_id = (blockIdx.x - seq_id) / valid_word_num;
const int seq_id = blockIdx.x;
for (int index = threadIdx.x; index < head_num * size_per_head; index += blockDim.x) {
const int size_id = index % size_per_head;
const int head_id = (index - size_id) / size_per_head;
const int target_offset = blockIdx.x * head_num * 3 * size_per_head + head_id * 3 * size_per_head;
const int src_id = seq_id * head_num * size_per_head + index;
qkv_buf[target_offset + 0 * size_per_head + size_id] = Q[src_id] + bias_Q[index];
qkv_buf[target_offset + 1 * size_per_head + size_id] = K[src_id] + bias_K[index];
qkv_buf[target_offset + 2 * size_per_head + size_id] = V[src_id] + bias_V[index];
}
}
template<typename T>
void FusedAttentionLayer<T>::invokeTrtAddQkvBias(size_t token_num, const AttentionWeight<T>* attention_weights)
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
dim3 grid(token_num);
dim3 block(min((int)(head_num_ * size_per_head_ / 2), 512));
trt_add_QKV_bias<<<grid, block, 0, stream_>>>((half2*)qkv_buf_,
(const half2*)q_buf_,
(const half2*)attention_weights->query_weight.bias,
(const half2*)k_buf_,
(const half2*)attention_weights->key_weight.bias,
(const half2*)v_buf_,
(const half2*)attention_weights->value_weight.bias,
token_num,
head_num_,
size_per_head_ / 2);
}
template<typename T>
void FusedAttentionLayer<T>::forward(TensorMap* output_tensors,
TensorMap* input_tensors,
const AttentionWeight<T>* attention_weights)
{
// input_tensors: [input_query (h_token_num, d_model),
// attention_mask (batch, 1, seqlen, seqlen),
// padding_offset (batch + 1 or batch * 2 + 1))]
// If padding_offset.data is nullptr, then not remove padding
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
const int request_batch_size = input_tensors->at("attention_mask").shape[0];
const int request_seq_len = input_tensors->at("attention_mask").shape[2];
allocateBuffer(request_batch_size, request_seq_len);
T* attention_out = output_tensors->getPtr<T>("hidden_features");
const T* from_tensor = input_tensors->getPtr<T>("input_query");
const T* attention_mask = input_tensors->getPtr<T>("attention_mask");
const int* padding_offset = input_tensors->getPtr<int>("padding_offset");
size_t m_tmp = input_tensors->at("input_query").shape[0];
if (m_tmp % 8 != 0) {
m_tmp = (m_tmp / 8 + 1) * 8;
}
const size_t m = input_tensors->at("input_query").shape[0];
int k = d_model_;
int n = hidden_units_;
#ifdef SPARSITY_ENABLED
const size_t m_padded = m_tmp;
if (sparse_ && cublas_wrapper_->isUseSparse(1, n, m, k)) {
cublas_wrapper_->SpGemm(
CUBLAS_OP_N, CUBLAS_OP_N, n, m_padded, k, attention_weights->query_weight.sp_kernel, from_tensor, q_buf_);
cublas_wrapper_->SpGemm(
CUBLAS_OP_N, CUBLAS_OP_N, n, m_padded, k, attention_weights->key_weight.sp_kernel, from_tensor, k_buf_);
cublas_wrapper_->SpGemm(
CUBLAS_OP_N, CUBLAS_OP_N, n, m_padded, k, attention_weights->value_weight.sp_kernel, from_tensor, v_buf_);
}
else {
#endif
const bool is_batched_QKV_ = cublas_wrapper_->isFuseBatchGemm(3, n, m, k);
if (is_batched_QKV_) {
const T* hA[]{attention_weights->query_weight.kernel,
attention_weights->key_weight.kernel,
attention_weights->value_weight.kernel,
nullptr,
from_tensor,
from_tensor,
from_tensor,
nullptr,
q_buf_,
k_buf_,
v_buf_,
nullptr};
// Note: Here, we assume the weights of each time may be different.
// If we can preprocess these weights before inference, we can reduce the overhead
// caused by cudaMemcpyAsync
check_cuda_error(
cudaMemcpyAsync((void*)batch_qkv_kernel_ptr_, hA, sizeof(T*) * 12, cudaMemcpyHostToDevice, stream_));
cublas_wrapper_->batchedGemm(CUBLAS_OP_N,
CUBLAS_OP_N,
n,
m,
k,
(const void* const*)batch_qkv_kernel_ptr_,
n,
(const void* const*)batch_qkv_input_ptr_,
k,
(void* const*)batch_qkv_buf_ptr_,
n,
3);
}
else {
cublas_wrapper_->Gemm(CUBLAS_OP_N,
CUBLAS_OP_N,
n,
m,
k,
attention_weights->query_weight.kernel,
n,
from_tensor,
k,
q_buf_,
n);
cublas_wrapper_->Gemm(
CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, attention_weights->key_weight.kernel, n, from_tensor, k, k_buf_, n);
cublas_wrapper_->Gemm(CUBLAS_OP_N,
CUBLAS_OP_N,
n,
m,
k,
attention_weights->value_weight.kernel,
n,
from_tensor,
k,
v_buf_,
n);
}
#ifdef SPARSITY_ENABLED
}
#endif
invokeTrtAddQkvBias(m, attention_weights);
sync_check_cuda_error();
int S = dispatcher_fp16->getSFromMaxSeqLen(request_seq_len);
FT_CHECK(dispatcher_fp16->isValid(S, false));
const int B = input_tensors->at("padding_offset").shape[0] - 1;
dispatcher_fp16->setup(S, B);
dispatcher_fp16->run(qkv_buf_, nullptr, padding_offset, attn_workspace_, qkv_buf_2_, stream_);
sync_check_cuda_error();
k = hidden_units_;
n = d_model_;
#ifdef SPARSITY_ENABLED
if (sparse_ && cublas_wrapper_->isUseSparse(1, n, m, k)) {
cublas_wrapper_->SpGemm(CUBLAS_OP_N,
CUBLAS_OP_N,
n,
m_padded,
k,
attention_weights->attention_output_weight.sp_kernel,
qkv_buf_2_,
attention_out);
}
else {
#endif
cublas_wrapper_->Gemm(CUBLAS_OP_N,
CUBLAS_OP_N,
n,
m,
k,
attention_weights->attention_output_weight.kernel,
n,
qkv_buf_2_,
k,
attention_out,
n);
#ifdef SPARSITY_ENABLED
}
#endif
if (is_free_buffer_after_forward_ == true) {
freeBuffer();
}
}
template<typename T>
FusedAttentionLayer<T>::FusedAttentionLayer(size_t max_batch_size,
size_t max_seq_len,
size_t head_num,
size_t size_per_head,
size_t d_model,
int sm,
float q_scaling,
cudaStream_t stream,
cublasMMWrapper* cublas_wrapper,
IAllocator* allocator,
bool is_free_buffer_after_forward,
bool sparse):
BaseAttentionLayer<T>(stream, cublas_wrapper, allocator, is_free_buffer_after_forward),
head_num_(head_num),
size_per_head_(size_per_head),
d_model_(d_model),
sm_(sm),
q_scaling_(q_scaling),
sparse_(sparse)
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
if (((sm_ == kSM_70 || sm_ == kSM_86 || sm_ == kSM_80 || sm_ == kSM_75 || sm_ == kSM_72) && size_per_head_ == 64)
|| ((sm_ == kSM_86 || sm_ == kSM_80 || sm_ == kSM_75) && size_per_head_ == 32)) {
dispatcher_fp16.reset(new FusedMHARunnerFP16v2(head_num_, size_per_head_, sm_, q_scaling_));
}
else {
throw std::runtime_error(std::string("[FT][ERROR] FusedAttentionLayer not support \n"));
}
hidden_units_ = head_num_ * size_per_head_;
}
template<typename T>
FusedAttentionLayer<T>::FusedAttentionLayer(FusedAttentionLayer<T> const& attention_layer):
FusedAttentionLayer(0,
0,
attention_layer.head_num_,
attention_layer.size_per_head_,
attention_layer.d_model_,
attention_layer.sm_,
attention_layer.q_scaling_,
attention_layer.stream_,
attention_layer.cublas_wrapper_,
attention_layer.allocator_,
attention_layer.is_free_buffer_after_forward_,
attention_layer.sparse_)
{
}
template<typename T>
FusedAttentionLayer<T>::~FusedAttentionLayer()
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
cublas_wrapper_ = nullptr;
freeBuffer();
}
template<typename T>
void FusedAttentionLayer<T>::allocateBuffer()
{
FT_CHECK(false);
}
template<typename T>
void FusedAttentionLayer<T>::allocateBuffer(size_t batch_size, size_t seq_len)
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
q_buf_ = (T*)allocator_->reMalloc(q_buf_, sizeof(T) * batch_size * seq_len * hidden_units_, false);
k_buf_ = (T*)allocator_->reMalloc(k_buf_, sizeof(T) * batch_size * seq_len * hidden_units_, false);
v_buf_ = (T*)allocator_->reMalloc(v_buf_, sizeof(T) * batch_size * seq_len * hidden_units_, false);
qkv_buf_ = (T*)allocator_->reMalloc(qkv_buf_, sizeof(T) * 3 * batch_size * seq_len * hidden_units_, false);
qkv_buf_2_ = (T*)allocator_->reMalloc(qkv_buf_2_, sizeof(T) * batch_size * seq_len * hidden_units_, false);
attn_workspace_ = (T*)allocator_->reMalloc(attn_workspace_, dispatcher_fp16->getWorkspaceSize(), false);
batch_qkv_kernel_ptr_ = (T**)allocator_->reMalloc(batch_qkv_kernel_ptr_, sizeof(T*) * 12, false);
batch_qkv_input_ptr_ = batch_qkv_kernel_ptr_ + 4;
batch_qkv_buf_ptr_ = batch_qkv_input_ptr_ + 4;
is_allocate_buffer_ = true;
}
template<typename T>
void FusedAttentionLayer<T>::freeBuffer()
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
if (is_allocate_buffer_) {
allocator_->free((void**)(&q_buf_));
allocator_->free((void**)(&k_buf_));
allocator_->free((void**)(&v_buf_));
allocator_->free((void**)(&qkv_buf_));
allocator_->free((void**)(&qkv_buf_2_));
allocator_->free((void**)(&attn_workspace_));
allocator_->free((void**)(&batch_qkv_kernel_ptr_));
sync_check_cuda_error();
is_allocate_buffer_ = false;
}
}
template<typename T>
bool FusedAttentionLayer<T>::isValidSeqLen(const size_t seq_len)
{
return true;
}
template class FusedAttentionLayer<float>;
template class FusedAttentionLayer<half>;
#ifdef ENABLE_BF16
template class FusedAttentionLayer<__nv_bfloat16>;
#endif
} // namespace fastertransformer
|
fe65b9c29ebab6546cd2e64f785c26b8729010be.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cuml/common/cuml_allocator.hpp>
#include <iostream>
#include <metrics/adjustedRandIndex.cuh>
#include <metrics/contingencyMatrix.cuh>
#include <random>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
struct AdjustedRandIndexParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
// if this is true, then it is assumed that `sameArrays` is also true
// further it also assumes `lowerLabelRange` and `upperLabelRange` are 0
bool testZeroArray;
};
template <typename T, typename MathT = int>
class AdjustedRandIndexTest
: public ::testing::TestWithParam<AdjustedRandIndexParam> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<AdjustedRandIndexParam>::GetParam();
nElements = params.nElements;
allocate(firstClusterArray, nElements, true);
allocate(secondClusterArray, nElements, true);
CUDA_CHECK(hipStreamCreate(&stream));
std::shared_ptr<deviceAllocator> allocator(
new raft::mr::device::default_allocator);
if (!params.testZeroArray) {
SetUpDifferentArrays();
} else {
SetupZeroArray();
}
//allocating and initializing memory to the GPU
computedAdjustedRandIndex = computeAdjustedRandIndex<T, MathT>(
firstClusterArray, secondClusterArray, nElements, allocator, stream);
}
void TearDown() override {
CUDA_CHECK(hipFree(firstClusterArray));
CUDA_CHECK(hipFree(secondClusterArray));
CUDA_CHECK(hipStreamDestroy(stream));
}
void SetUpDifferentArrays() {
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange,
upperLabelRange);
std::generate(arr1.begin(), arr1.end(),
[&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(),
[&]() { return intGenerator(dre); });
}
// calculating golden output
int numUniqueClasses = upperLabelRange - lowerLabelRange + 1;
size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int);
int *hGoldenOutput = (int *)malloc(sizeOfMat);
memset(hGoldenOutput, 0, sizeOfMat);
for (int i = 0; i < nElements; i++) {
int row = arr1[i] - lowerLabelRange;
int column = arr2[i] - lowerLabelRange;
hGoldenOutput[row * numUniqueClasses + column] += 1;
}
int sumOfNijCTwo = 0;
int *a = (int *)malloc(numUniqueClasses * sizeof(int));
int *b = (int *)malloc(numUniqueClasses * sizeof(int));
memset(a, 0, numUniqueClasses * sizeof(int));
memset(b, 0, numUniqueClasses * sizeof(int));
int sumOfAiCTwo = 0;
int sumOfBiCTwo = 0;
//calculating the sum of number of pairwise points in each index
//and also the reducing contingency matrix along row and column
for (int i = 0; i < numUniqueClasses; ++i) {
for (int j = 0; j < numUniqueClasses; ++j) {
int Nij = hGoldenOutput[i * numUniqueClasses + j];
sumOfNijCTwo += ((Nij) * (Nij - 1)) / 2;
a[i] += hGoldenOutput[i * numUniqueClasses + j];
b[i] += hGoldenOutput[j * numUniqueClasses + i];
}
}
//claculating the sum of number pairwise points in ever column sum
//claculating the sum of number pairwise points in ever row sum
for (int i = 0; i < numUniqueClasses; ++i) {
sumOfAiCTwo += ((a[i]) * (a[i] - 1)) / 2;
sumOfBiCTwo += ((b[i]) * (b[i] - 1)) / 2;
}
//calculating the ARI
double nCTwo = double(nElements) * double(nElements - 1) / 2.0;
double expectedIndex =
(double(sumOfBiCTwo) * double(sumOfAiCTwo)) / double(nCTwo);
double maxIndex = (double(sumOfAiCTwo) + double(sumOfBiCTwo)) / 2.0;
double index = (double)sumOfNijCTwo;
if (maxIndex - expectedIndex)
truthAdjustedRandIndex =
(index - expectedIndex) / (maxIndex - expectedIndex);
else
truthAdjustedRandIndex = 0;
updateDevice(firstClusterArray, &arr1[0], nElements, stream);
updateDevice(secondClusterArray, &arr2[0], nElements, stream);
}
void SetupZeroArray() {
lowerLabelRange = 0;
upperLabelRange = 0;
truthAdjustedRandIndex = 1.0;
}
AdjustedRandIndexParam params;
T lowerLabelRange, upperLabelRange;
T *firstClusterArray = nullptr;
T *secondClusterArray = nullptr;
int nElements = 0;
double truthAdjustedRandIndex = 0;
double computedAdjustedRandIndex = 0;
hipStream_t stream;
};
const std::vector<AdjustedRandIndexParam> inputs = {
{199, 1, 10, false, 0.000001, false}, {200, 15, 100, false, 0.000001, false},
{100, 1, 20, false, 0.000001, false}, {10, 1, 10, false, 0.000001, false},
{198, 1, 100, false, 0.000001, false}, {300, 3, 99, false, 0.000001, false},
{199, 1, 10, true, 0.000001, false}, {200, 15, 100, true, 0.000001, false},
{100, 1, 20, true, 0.000001, false}, {10, 1, 10, true, 0.000001, false},
{198, 1, 100, true, 0.000001, false}, {300, 3, 99, true, 0.000001, false},
{199, 0, 0, false, 0.000001, true}, {200, 0, 0, false, 0.000001, true},
{100, 0, 0, false, 0.000001, true}, {10, 0, 0, false, 0.000001, true},
{198, 0, 0, false, 0.000001, true}, {300, 0, 0, false, 0.000001, true},
{199, 0, 0, true, 0.000001, true}, {200, 0, 0, true, 0.000001, true},
{100, 0, 0, true, 0.000001, true}, {10, 0, 0, true, 0.000001, true},
{198, 0, 0, true, 0.000001, true}, {300, 0, 0, true, 0.000001, true},
};
const std::vector<AdjustedRandIndexParam> large_inputs = {
{2000000, 1, 1000, false, 0.000001, false},
{2000000, 1, 1000, true, 0.000001, false},
{2000000, 0, 0, false, 0.000001, true},
{2000000, 0, 0, true, 0.000001, true},
};
typedef AdjustedRandIndexTest<int, int> ARI_ii;
TEST_P(ARI_ii, Result) {
ASSERT_NEAR(computedAdjustedRandIndex, truthAdjustedRandIndex,
params.tolerance);
}
INSTANTIATE_TEST_CASE_P(AdjustedRandIndex, ARI_ii, ::testing::ValuesIn(inputs));
typedef AdjustedRandIndexTest<int, unsigned long long> ARI_il;
TEST_P(ARI_il, Result) {
ASSERT_NEAR(computedAdjustedRandIndex, truthAdjustedRandIndex,
params.tolerance);
}
INSTANTIATE_TEST_CASE_P(AdjustedRandIndex, ARI_il, ::testing::ValuesIn(inputs));
INSTANTIATE_TEST_CASE_P(AdjustedRandIndexLarge, ARI_il,
::testing::ValuesIn(large_inputs));
} //end namespace Metrics
} //end namespace MLCommon
| fe65b9c29ebab6546cd2e64f785c26b8729010be.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cuml/common/cuml_allocator.hpp>
#include <iostream>
#include <metrics/adjustedRandIndex.cuh>
#include <metrics/contingencyMatrix.cuh>
#include <random>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
struct AdjustedRandIndexParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
// if this is true, then it is assumed that `sameArrays` is also true
// further it also assumes `lowerLabelRange` and `upperLabelRange` are 0
bool testZeroArray;
};
template <typename T, typename MathT = int>
class AdjustedRandIndexTest
: public ::testing::TestWithParam<AdjustedRandIndexParam> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<AdjustedRandIndexParam>::GetParam();
nElements = params.nElements;
allocate(firstClusterArray, nElements, true);
allocate(secondClusterArray, nElements, true);
CUDA_CHECK(cudaStreamCreate(&stream));
std::shared_ptr<deviceAllocator> allocator(
new raft::mr::device::default_allocator);
if (!params.testZeroArray) {
SetUpDifferentArrays();
} else {
SetupZeroArray();
}
//allocating and initializing memory to the GPU
computedAdjustedRandIndex = computeAdjustedRandIndex<T, MathT>(
firstClusterArray, secondClusterArray, nElements, allocator, stream);
}
void TearDown() override {
CUDA_CHECK(cudaFree(firstClusterArray));
CUDA_CHECK(cudaFree(secondClusterArray));
CUDA_CHECK(cudaStreamDestroy(stream));
}
void SetUpDifferentArrays() {
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange,
upperLabelRange);
std::generate(arr1.begin(), arr1.end(),
[&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(),
[&]() { return intGenerator(dre); });
}
// calculating golden output
int numUniqueClasses = upperLabelRange - lowerLabelRange + 1;
size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int);
int *hGoldenOutput = (int *)malloc(sizeOfMat);
memset(hGoldenOutput, 0, sizeOfMat);
for (int i = 0; i < nElements; i++) {
int row = arr1[i] - lowerLabelRange;
int column = arr2[i] - lowerLabelRange;
hGoldenOutput[row * numUniqueClasses + column] += 1;
}
int sumOfNijCTwo = 0;
int *a = (int *)malloc(numUniqueClasses * sizeof(int));
int *b = (int *)malloc(numUniqueClasses * sizeof(int));
memset(a, 0, numUniqueClasses * sizeof(int));
memset(b, 0, numUniqueClasses * sizeof(int));
int sumOfAiCTwo = 0;
int sumOfBiCTwo = 0;
//calculating the sum of number of pairwise points in each index
//and also the reducing contingency matrix along row and column
for (int i = 0; i < numUniqueClasses; ++i) {
for (int j = 0; j < numUniqueClasses; ++j) {
int Nij = hGoldenOutput[i * numUniqueClasses + j];
sumOfNijCTwo += ((Nij) * (Nij - 1)) / 2;
a[i] += hGoldenOutput[i * numUniqueClasses + j];
b[i] += hGoldenOutput[j * numUniqueClasses + i];
}
}
//claculating the sum of number pairwise points in ever column sum
//claculating the sum of number pairwise points in ever row sum
for (int i = 0; i < numUniqueClasses; ++i) {
sumOfAiCTwo += ((a[i]) * (a[i] - 1)) / 2;
sumOfBiCTwo += ((b[i]) * (b[i] - 1)) / 2;
}
//calculating the ARI
double nCTwo = double(nElements) * double(nElements - 1) / 2.0;
double expectedIndex =
(double(sumOfBiCTwo) * double(sumOfAiCTwo)) / double(nCTwo);
double maxIndex = (double(sumOfAiCTwo) + double(sumOfBiCTwo)) / 2.0;
double index = (double)sumOfNijCTwo;
if (maxIndex - expectedIndex)
truthAdjustedRandIndex =
(index - expectedIndex) / (maxIndex - expectedIndex);
else
truthAdjustedRandIndex = 0;
updateDevice(firstClusterArray, &arr1[0], nElements, stream);
updateDevice(secondClusterArray, &arr2[0], nElements, stream);
}
void SetupZeroArray() {
lowerLabelRange = 0;
upperLabelRange = 0;
truthAdjustedRandIndex = 1.0;
}
AdjustedRandIndexParam params;
T lowerLabelRange, upperLabelRange;
T *firstClusterArray = nullptr;
T *secondClusterArray = nullptr;
int nElements = 0;
double truthAdjustedRandIndex = 0;
double computedAdjustedRandIndex = 0;
cudaStream_t stream;
};
const std::vector<AdjustedRandIndexParam> inputs = {
{199, 1, 10, false, 0.000001, false}, {200, 15, 100, false, 0.000001, false},
{100, 1, 20, false, 0.000001, false}, {10, 1, 10, false, 0.000001, false},
{198, 1, 100, false, 0.000001, false}, {300, 3, 99, false, 0.000001, false},
{199, 1, 10, true, 0.000001, false}, {200, 15, 100, true, 0.000001, false},
{100, 1, 20, true, 0.000001, false}, {10, 1, 10, true, 0.000001, false},
{198, 1, 100, true, 0.000001, false}, {300, 3, 99, true, 0.000001, false},
{199, 0, 0, false, 0.000001, true}, {200, 0, 0, false, 0.000001, true},
{100, 0, 0, false, 0.000001, true}, {10, 0, 0, false, 0.000001, true},
{198, 0, 0, false, 0.000001, true}, {300, 0, 0, false, 0.000001, true},
{199, 0, 0, true, 0.000001, true}, {200, 0, 0, true, 0.000001, true},
{100, 0, 0, true, 0.000001, true}, {10, 0, 0, true, 0.000001, true},
{198, 0, 0, true, 0.000001, true}, {300, 0, 0, true, 0.000001, true},
};
const std::vector<AdjustedRandIndexParam> large_inputs = {
{2000000, 1, 1000, false, 0.000001, false},
{2000000, 1, 1000, true, 0.000001, false},
{2000000, 0, 0, false, 0.000001, true},
{2000000, 0, 0, true, 0.000001, true},
};
typedef AdjustedRandIndexTest<int, int> ARI_ii;
TEST_P(ARI_ii, Result) {
ASSERT_NEAR(computedAdjustedRandIndex, truthAdjustedRandIndex,
params.tolerance);
}
INSTANTIATE_TEST_CASE_P(AdjustedRandIndex, ARI_ii, ::testing::ValuesIn(inputs));
typedef AdjustedRandIndexTest<int, unsigned long long> ARI_il;
TEST_P(ARI_il, Result) {
ASSERT_NEAR(computedAdjustedRandIndex, truthAdjustedRandIndex,
params.tolerance);
}
INSTANTIATE_TEST_CASE_P(AdjustedRandIndex, ARI_il, ::testing::ValuesIn(inputs));
INSTANTIATE_TEST_CASE_P(AdjustedRandIndexLarge, ARI_il,
::testing::ValuesIn(large_inputs));
} //end namespace Metrics
} //end namespace MLCommon
|
a2168e25e7a417010cf718240bb7950a03347948.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
__global__ void kernel( void ) { }
int main()
{hipLaunchKernelGGL((
kernel), dim3(1), dim3(1) , 0, 0, );
printf("Hello World!\n");
return EXIT_SUCCESS;
}
| a2168e25e7a417010cf718240bb7950a03347948.cu | #include <stdlib.h>
#include <stdio.h>
__global__ void kernel( void ) { }
int main()
{
kernel<<< 1, 1 >>>();
printf("Hello World!\n");
return EXIT_SUCCESS;
}
|
2c47dd770d0f8a3de8e5b90028e6fa4e05e3c1d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
void cuda_points_locate()
{
// parallelize over CPU threads
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
checkCudaErrors(hipSetDevice(dev + dev_start));
int threads = MAX_THREADS_1D;
int blocks = (int)ceil((real) npoints / (real) threads);
dim3 dimBlocks(threads);
dim3 numBlocks(blocks);
hipLaunchKernelGGL((
lpt_localize), dim3(numBlocks), dim3(dimBlocks), 0, 0, npoints,_points[dev],_dom[dev],bc);
}
}
__global__ void lpt_localize(int npoints, point_struct *points, dom_struct *dom, BC bc)
{
int pp = threadIdx.x + blockIdx.x*blockDim.x;
if(pp<npoints)
{
// Cartesian location of node
real xp = points[pp].x;
real yp = points[pp].y;
real zp = points[pp].z;
//TODO whether periodic BC for point particle need to be determined in future
if(xp < dom->xs && bc.uW == PERIODIC) xp = xp + dom->xl;
else if(xp > dom->xe && bc.uE == PERIODIC) xp = xp - dom->xl;
if(yp < dom->ys && bc.vS == PERIODIC) yp = yp + dom->yl;
else if(yp > dom->ye && bc.vN == PERIODIC) yp = yp - dom->yl;
if(zp < dom->zs && bc.wB == PERIODIC) zp = zp + dom->zl;
else if(zp > dom->ze && bc.wT == PERIODIC) zp = zp - dom->zl;
int i = points[pp].i;
int j = points[pp].j;
int k = points[pp].k;
//TODO
if(i < dom->Gcc.is) i = dom->Gcc.ie-1;
if(j < dom->Gcc.js) j = dom->Gcc.je-1;
if(k < dom->Gcc.ks) k = dom->Gcc.ke-1;
if(i > dom->Gcc.ie-1) i = dom->Gcc.is;
if(j > dom->Gcc.je-1) j = dom->Gcc.js;
if(k > dom->Gcc.ke-1) k = dom->Gcc.ks;
int ip=i;
int jp=j;
int kp=k;
real x = (ip-DOM_BUF) * dom->dx + dom->xs;
real y = (jp-DOM_BUF) * dom->dy + dom->ys;
real z = (kp-DOM_BUF) * dom->dz + dom->zs;
while(xp <x)
{
ip=ip-1;
x = (ip-DOM_BUF) * dom->dx + dom->xs;
}
while(xp >=x)
{
ip=ip+1;
x = (ip-DOM_BUF) * dom->dx + dom->xs;
}
while(yp <y)
{
jp=jp-1;
y = (jp-DOM_BUF) * dom->dy + dom->ys;
}
while(yp >=y)
{
jp=jp+1;
y = (jp-DOM_BUF) * dom->dy + dom->ys;
}
while(zp <z)
{
kp=kp-1;
z = (kp-DOM_BUF) * dom->dz + dom->zs;
}
while(zp >=z)
{
kp=kp+1;
z = (kp-DOM_BUF) * dom->dz + dom->zs;
}
points[pp].i= ip;
points[pp].j= jp;
points[pp].k= kp;
}
}
__global__ void lpt_smooth(int npoints, point_struct *points, dom_struct *dom, real *fpx,real *fpy,real *fpz,real *f_x,real *f_y,real *f_z, BC bc)
{
int pp = threadIdx.x + blockIdx.x*blockDim.x;
real ddx = 1. / dom->dx;
real ddy = 1. / dom->dy;
real ddz = 1. / dom->dz;
if(pp<npoints)
{
// Cartesian location of node
real xp = points[pp].x;
real yp = points[pp].y;
real zp = points[pp].z;
//TODO whether periodic BC for point particle need to be determined in future
if(xp < dom->xs && bc.uW == PERIODIC) xp = xp + dom->xl;
else if(xp > dom->xe && bc.uE == PERIODIC) xp = xp - dom->xl;
if(yp < dom->ys && bc.vS == PERIODIC) yp = yp + dom->yl;
else if(yp > dom->ye && bc.vN == PERIODIC) yp = yp - dom->yl;
if(zp < dom->zs && bc.wB == PERIODIC) zp = zp + dom->zl;
else if(zp > dom->ze && bc.wT == PERIODIC) zp = zp - dom->zl;
int i = points[pp].i;
int j = points[pp].j;
int k = points[pp].k;
if(i < dom->Gcc.is) i = dom->Gcc.ie-1;
if(j < dom->Gcc.js) j = dom->Gcc.je-1;
if(k < dom->Gcc.ks) k = dom->Gcc.ke-1;
if(i > dom->Gcc.ie-1) i = dom->Gcc.is;
if(j > dom->Gcc.je-1) j = dom->Gcc.js;
if(k > dom->Gcc.ke-1) k = dom->Gcc.ks;
//TODO how to smooth the func????
fx[pp]=
fy[pp]=
fz[pp]=
}
}
__device__ void lpt_source_scalar(real xd,real yd,real zd,int id,int jd,int kd,real ud,real dud,real vd,real dvd,real wd,real dwd,real md,real dmd,int nparceld)
{
// Create the source term
//TODO need to change scalar source!!!
real scalar_src =1.f
//mollify each source term
lpt_mollify_sc(tmp1,xd,yd,zd,id,jd,kd,scalar_src);
}
__global__ void lpt_source_scalar(real *u, real *v, real *w,int npoints,real rho_f, real nu,real *ug,real *vg,real *wg, point_struct *points, dom_struct *dom, real dt0, real dt, BC bc)
real *scSrc,
{
int pp = threadIdx.x + blockIdx.x*blockDim.x;
real ddx = 1. / dom->dx;
real ddy = 1. / dom->dy;
real ddz = 1. / dom->dz;
if(pp<npoints)
{
// Cartesian location of node
real xd = points[pp].x;
real yd = points[pp].y;
real zd = points[pp].z;
int id = points[pp].i;
int jd = points[pp].j;
int kd = points[pp].k;
// Create the source term
//TODO need to change scalar source!!!
real scalar_src_points =1.f
//mollify each source term
lpt_mollify_sc(scSrc,xd,yd,zd,id,jd,kd,scalar_src_points);
//
entrySearch_avg_entries_kernel(real *iarr, real *maxarr, int size);
}
}
/*==================================== !
! Spray -> gas phase momentum exchange !
! Careful, still need to divide by vol !
! ==================================== !*/
__device__ void lpt_source_momentum(real xd,real yd,real zd,int id,int jd,int kd,real ud,real dud,real vd,real dvd,real wd,real dwd,real md,real dmd,int nparceld)
{
// Create the source term
real mom_src_x = -(md*dud+dmd*ud)*nparceld
real mom_src_y = -(md*dvd+dmd*vd)*nparceld
real mom_src_z = -(md*dwd+dmd*wd)*nparceld
lpt_mollify_sc(tmp1,xd,yd,zd,id,jd,kd,mom_src_x);
lpt_mollify_sc(tmp2,xd,yd,zd,id,jd,kd,mom_src_y);
lpt_mollify_sc(tmp3,xd,yd,zd,id,jd,kd,mom_src_z);
}
//Case periodic in y and z directions
// xp~zp and ip~jp are the particle position and the cell index of the particle correspondingly!
//A is the value on grid cell center(which is need to be interpretated), and Ap is the particle contribution strength
__device__ void lpt_mollify_sc(real *A,real xp,real yp,real zp,int ip,int jp,int kp,real Ap)
{
if(ip < dom->Gcc.is||jp < dom->Gcc.js||kp < dom->Gcc.ks||ip > dom->Gcc.ie-1||jp > dom->Gcc.je-1||kp > dom->Gcc.ke-1)
{
printf("\nip,jp,kp,xp,yp,zp %d %d %d %d %d %d\n",ip,jp,kp,xp,yp,zp);
fprintf(stderr,"\nParticle has left the domain\n");
exit(EXIT_FAILURE);
}
real ksi[3][3][3];
real buf=0;
for(int dk=-1;dk<2;dk++)
for(int dj=-1;dj<2;dj++)
for(int di=-1;di<2;di++)
{{{
//TODO what's ksi????
ksi[di+1][dj+1][dk+1]=lpt_integrate_mol(ip+di,jp+dj,kp+dk,xp,yp,zp);
buf+=ksi[di+1][dj+1][dk+1];
}}}
//TODO add mask infomation as in lpt_mollify_sc in lpt_interpolator.f90
// Normalize ksi = ksi/buf
if (buf>0.f){
for(int dk=-1;dk<2;dk++)
for(int dj=-1;dj<2;dj++)
for(int di=-1;di<2;di++)
{{{ksi[di+1][dj+1][dk+1]=ksi[di+1][dj+1][dk+1]/buf;}}}
}
// Perform the actual extrapolation on A
for(int dk=-1;dk<2;dk++)
for(int dj=-1;dj<2;dj++)
for(int di=-1;di<2;di++)
{{{
A[di+ip][dj+jp][dk+kp]+=ksi[di+1][dj+1][dk+1]*Ap;
}}}
}
__device__ real lpt_integrate_mol(int ic,int jc,int kc,real xp,real yp,real zp, real dx,real dy,real dz,real xs,real ys,real zs)
{
/*
real xm = (ic-DOM_BUF+0.5) * dom->dx + dom->xs;
real ym = (jc-DOM_BUF+0.5) * dom->dy + dom->ys;
real zm = (kc-DOM_BUF+0.5) * dom->dz + dom->zs;
real r = sqrt((xp-xm)*(xp-xm)+(yp-ym)*(yp-ym)+(zp-zm)*(zp-zm));
//TODO make this as defined value avaible from host and device!!!
real min_meshsize=min(min(dom->dx,dom->dy),dom->dz);
real cellVol=dom->dx *dom->dy *dom->dz;
*/
real xm = (ic-DOM_BUF+0.5) * dx + xs;
real ym = (jc-DOM_BUF+0.5) * dy + ys;
real zm = (kc-DOM_BUF+0.5) * dz + zs;
real r = sqrt((xp-xm)*(xp-xm)+(yp-ym)*(yp-ym)+(zp-zm)*(zp-zm));
//TODO make this as defined value avaible from host and device!!!
real min_meshsize=min(min(dx,dy),dz);
real cellVol=dx *dy *dz;
real sig= min_meshsize/(2.0f*sqrt(2.0f*log(2.0f)));
real val = exp(-r*r/(2.0f*sig*sig));
real fs= cellVol*val;
return fs;
}
__global__ void forcing_add_sc_const(real val, real *sc, dom_struct *dom)
{
int tj = blockIdx.x * blockDim.x + threadIdx.x;
int tk = blockIdx.y * blockDim.y + threadIdx.y;
for(int i = dom->Gcc._isb; i < dom->Gcc._ieb; i++) {
if(tj < dom->Gcc._jnb && tk < dom->Gcc._knb) {
sc[i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b] += val;
}
}
}
__global__ void forcing_add_sc_field(real scale, real *val, real *sc,
dom_struct *dom)
{
int tj = blockIdx.x * blockDim.x + threadIdx.x;
int tk = blockIdx.y * blockDim.y + threadIdx.y;
for(int i = dom->Gcc._isb; i < dom->Gcc._ieb; i++) {
if(tj < dom->Gcc._jnb && tk < dom->Gcc._knb) {
sc[i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b]
+= scale * val[i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b];
}
}
}
| 2c47dd770d0f8a3de8e5b90028e6fa4e05e3c1d8.cu | extern "C"
void cuda_points_locate()
{
// parallelize over CPU threads
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
checkCudaErrors(cudaSetDevice(dev + dev_start));
int threads = MAX_THREADS_1D;
int blocks = (int)ceil((real) npoints / (real) threads);
dim3 dimBlocks(threads);
dim3 numBlocks(blocks);
lpt_localize<<<numBlocks, dimBlocks>>>(npoints,_points[dev],_dom[dev],bc);
}
}
__global__ void lpt_localize(int npoints, point_struct *points, dom_struct *dom, BC bc)
{
int pp = threadIdx.x + blockIdx.x*blockDim.x;
if(pp<npoints)
{
// Cartesian location of node
real xp = points[pp].x;
real yp = points[pp].y;
real zp = points[pp].z;
//TODO whether periodic BC for point particle need to be determined in future
if(xp < dom->xs && bc.uW == PERIODIC) xp = xp + dom->xl;
else if(xp > dom->xe && bc.uE == PERIODIC) xp = xp - dom->xl;
if(yp < dom->ys && bc.vS == PERIODIC) yp = yp + dom->yl;
else if(yp > dom->ye && bc.vN == PERIODIC) yp = yp - dom->yl;
if(zp < dom->zs && bc.wB == PERIODIC) zp = zp + dom->zl;
else if(zp > dom->ze && bc.wT == PERIODIC) zp = zp - dom->zl;
int i = points[pp].i;
int j = points[pp].j;
int k = points[pp].k;
//TODO
if(i < dom->Gcc.is) i = dom->Gcc.ie-1;
if(j < dom->Gcc.js) j = dom->Gcc.je-1;
if(k < dom->Gcc.ks) k = dom->Gcc.ke-1;
if(i > dom->Gcc.ie-1) i = dom->Gcc.is;
if(j > dom->Gcc.je-1) j = dom->Gcc.js;
if(k > dom->Gcc.ke-1) k = dom->Gcc.ks;
int ip=i;
int jp=j;
int kp=k;
real x = (ip-DOM_BUF) * dom->dx + dom->xs;
real y = (jp-DOM_BUF) * dom->dy + dom->ys;
real z = (kp-DOM_BUF) * dom->dz + dom->zs;
while(xp <x)
{
ip=ip-1;
x = (ip-DOM_BUF) * dom->dx + dom->xs;
}
while(xp >=x)
{
ip=ip+1;
x = (ip-DOM_BUF) * dom->dx + dom->xs;
}
while(yp <y)
{
jp=jp-1;
y = (jp-DOM_BUF) * dom->dy + dom->ys;
}
while(yp >=y)
{
jp=jp+1;
y = (jp-DOM_BUF) * dom->dy + dom->ys;
}
while(zp <z)
{
kp=kp-1;
z = (kp-DOM_BUF) * dom->dz + dom->zs;
}
while(zp >=z)
{
kp=kp+1;
z = (kp-DOM_BUF) * dom->dz + dom->zs;
}
points[pp].i= ip;
points[pp].j= jp;
points[pp].k= kp;
}
}
__global__ void lpt_smooth(int npoints, point_struct *points, dom_struct *dom, real *fpx,real *fpy,real *fpz,real *f_x,real *f_y,real *f_z, BC bc)
{
int pp = threadIdx.x + blockIdx.x*blockDim.x;
real ddx = 1. / dom->dx;
real ddy = 1. / dom->dy;
real ddz = 1. / dom->dz;
if(pp<npoints)
{
// Cartesian location of node
real xp = points[pp].x;
real yp = points[pp].y;
real zp = points[pp].z;
//TODO whether periodic BC for point particle need to be determined in future
if(xp < dom->xs && bc.uW == PERIODIC) xp = xp + dom->xl;
else if(xp > dom->xe && bc.uE == PERIODIC) xp = xp - dom->xl;
if(yp < dom->ys && bc.vS == PERIODIC) yp = yp + dom->yl;
else if(yp > dom->ye && bc.vN == PERIODIC) yp = yp - dom->yl;
if(zp < dom->zs && bc.wB == PERIODIC) zp = zp + dom->zl;
else if(zp > dom->ze && bc.wT == PERIODIC) zp = zp - dom->zl;
int i = points[pp].i;
int j = points[pp].j;
int k = points[pp].k;
if(i < dom->Gcc.is) i = dom->Gcc.ie-1;
if(j < dom->Gcc.js) j = dom->Gcc.je-1;
if(k < dom->Gcc.ks) k = dom->Gcc.ke-1;
if(i > dom->Gcc.ie-1) i = dom->Gcc.is;
if(j > dom->Gcc.je-1) j = dom->Gcc.js;
if(k > dom->Gcc.ke-1) k = dom->Gcc.ks;
//TODO how to smooth the func????
fx[pp]=
fy[pp]=
fz[pp]=
}
}
__device__ void lpt_source_scalar(real xd,real yd,real zd,int id,int jd,int kd,real ud,real dud,real vd,real dvd,real wd,real dwd,real md,real dmd,int nparceld)
{
// Create the source term
//TODO need to change scalar source!!!
real scalar_src =1.f
//mollify each source term
lpt_mollify_sc(tmp1,xd,yd,zd,id,jd,kd,scalar_src);
}
__global__ void lpt_source_scalar(real *u, real *v, real *w,int npoints,real rho_f, real nu,real *ug,real *vg,real *wg, point_struct *points, dom_struct *dom, real dt0, real dt, BC bc)
real *scSrc,
{
int pp = threadIdx.x + blockIdx.x*blockDim.x;
real ddx = 1. / dom->dx;
real ddy = 1. / dom->dy;
real ddz = 1. / dom->dz;
if(pp<npoints)
{
// Cartesian location of node
real xd = points[pp].x;
real yd = points[pp].y;
real zd = points[pp].z;
int id = points[pp].i;
int jd = points[pp].j;
int kd = points[pp].k;
// Create the source term
//TODO need to change scalar source!!!
real scalar_src_points =1.f
//mollify each source term
lpt_mollify_sc(scSrc,xd,yd,zd,id,jd,kd,scalar_src_points);
//
entrySearch_avg_entries_kernel(real *iarr, real *maxarr, int size);
}
}
/*==================================== !
! Spray -> gas phase momentum exchange !
! Careful, still need to divide by vol !
! ==================================== !*/
__device__ void lpt_source_momentum(real xd,real yd,real zd,int id,int jd,int kd,real ud,real dud,real vd,real dvd,real wd,real dwd,real md,real dmd,int nparceld)
{
// Create the source term
real mom_src_x = -(md*dud+dmd*ud)*nparceld
real mom_src_y = -(md*dvd+dmd*vd)*nparceld
real mom_src_z = -(md*dwd+dmd*wd)*nparceld
lpt_mollify_sc(tmp1,xd,yd,zd,id,jd,kd,mom_src_x);
lpt_mollify_sc(tmp2,xd,yd,zd,id,jd,kd,mom_src_y);
lpt_mollify_sc(tmp3,xd,yd,zd,id,jd,kd,mom_src_z);
}
//Case periodic in y and z directions
// xp~zp and ip~jp are the particle position and the cell index of the particle correspondingly!
//A is the value on grid cell center(which is need to be interpretated), and Ap is the particle contribution strength
__device__ void lpt_mollify_sc(real *A,real xp,real yp,real zp,int ip,int jp,int kp,real Ap)
{
if(ip < dom->Gcc.is||jp < dom->Gcc.js||kp < dom->Gcc.ks||ip > dom->Gcc.ie-1||jp > dom->Gcc.je-1||kp > dom->Gcc.ke-1)
{
printf("\nip,jp,kp,xp,yp,zp %d %d %d %d %d %d\n",ip,jp,kp,xp,yp,zp);
fprintf(stderr,"\nParticle has left the domain\n");
exit(EXIT_FAILURE);
}
real ksi[3][3][3];
real buf=0;
for(int dk=-1;dk<2;dk++)
for(int dj=-1;dj<2;dj++)
for(int di=-1;di<2;di++)
{{{
//TODO what's ksi????
ksi[di+1][dj+1][dk+1]=lpt_integrate_mol(ip+di,jp+dj,kp+dk,xp,yp,zp);
buf+=ksi[di+1][dj+1][dk+1];
}}}
//TODO add mask infomation as in lpt_mollify_sc in lpt_interpolator.f90
// Normalize ksi = ksi/buf
if (buf>0.f){
for(int dk=-1;dk<2;dk++)
for(int dj=-1;dj<2;dj++)
for(int di=-1;di<2;di++)
{{{ksi[di+1][dj+1][dk+1]=ksi[di+1][dj+1][dk+1]/buf;}}}
}
// Perform the actual extrapolation on A
for(int dk=-1;dk<2;dk++)
for(int dj=-1;dj<2;dj++)
for(int di=-1;di<2;di++)
{{{
A[di+ip][dj+jp][dk+kp]+=ksi[di+1][dj+1][dk+1]*Ap;
}}}
}
__device__ real lpt_integrate_mol(int ic,int jc,int kc,real xp,real yp,real zp, real dx,real dy,real dz,real xs,real ys,real zs)
{
/*
real xm = (ic-DOM_BUF+0.5) * dom->dx + dom->xs;
real ym = (jc-DOM_BUF+0.5) * dom->dy + dom->ys;
real zm = (kc-DOM_BUF+0.5) * dom->dz + dom->zs;
real r = sqrt((xp-xm)*(xp-xm)+(yp-ym)*(yp-ym)+(zp-zm)*(zp-zm));
//TODO make this as defined value avaible from host and device!!!
real min_meshsize=min(min(dom->dx,dom->dy),dom->dz);
real cellVol=dom->dx *dom->dy *dom->dz;
*/
real xm = (ic-DOM_BUF+0.5) * dx + xs;
real ym = (jc-DOM_BUF+0.5) * dy + ys;
real zm = (kc-DOM_BUF+0.5) * dz + zs;
real r = sqrt((xp-xm)*(xp-xm)+(yp-ym)*(yp-ym)+(zp-zm)*(zp-zm));
//TODO make this as defined value avaible from host and device!!!
real min_meshsize=min(min(dx,dy),dz);
real cellVol=dx *dy *dz;
real sig= min_meshsize/(2.0f*sqrt(2.0f*log(2.0f)));
real val = exp(-r*r/(2.0f*sig*sig));
real fs= cellVol*val;
return fs;
}
__global__ void forcing_add_sc_const(real val, real *sc, dom_struct *dom)
{
int tj = blockIdx.x * blockDim.x + threadIdx.x;
int tk = blockIdx.y * blockDim.y + threadIdx.y;
for(int i = dom->Gcc._isb; i < dom->Gcc._ieb; i++) {
if(tj < dom->Gcc._jnb && tk < dom->Gcc._knb) {
sc[i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b] += val;
}
}
}
__global__ void forcing_add_sc_field(real scale, real *val, real *sc,
dom_struct *dom)
{
int tj = blockIdx.x * blockDim.x + threadIdx.x;
int tk = blockIdx.y * blockDim.y + threadIdx.y;
for(int i = dom->Gcc._isb; i < dom->Gcc._ieb; i++) {
if(tj < dom->Gcc._jnb && tk < dom->Gcc._knb) {
sc[i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b]
+= scale * val[i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b];
}
}
}
|
2bd7dca91cbbf7770e2788b292b76f97dff43e63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "../common.h"
__inline__
__device__ // afterward: (a <= b)
static void cmpswap(int &a, int &b)
{
if (a > b) {
int tmp = a;
a = b;
b = tmp;
}
}
texture<int> tex;
__global__
static void median2(int nx, int ny, int *d_dst, const int *d_src, size_t offset)
{
// TODO get image window (global memory or textures)
// TODO (advanced) shared memory
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
int v[9] = {0,0,0, 0,0,0, 0,0,0};
int index = 0;
for (int yy = max(0,y-1); yy <= min(ny-1,y+1); ++yy) {
for (int xx = max(0,x-1); xx <= min(nx-1,x+1); ++xx)
v[index++] = tex1Dfetch(tex, yy*nx + xx + offset);
}
// TODO sort and get the median value (bubble sort)
// TODO (advanced) 3x3 exchange sort: http://graphics.cs.williams.edu/papers/MedianShaderX6/median.pix
for (int i = 0; i < 9; ++i) {
for (int j = i + 1; j < 9; j++)
cmpswap(v[i], v[j]);
}
// TODO store the value in the output image
d_dst[y*nx + x] = v[4];
}
int main()
{
int nx = 1024, ny = 1024; // TODO larger sizes
int *h_src = (int *)malloc(nx*ny*sizeof(*h_src));
for (int x = 0; x < nx; ++x) {
for (int y = 0; y < ny; ++y)
h_src[y*nx + x] = 10 * float(rand())/RAND_MAX; // [0-10]
}
// print upper-left corner to verify
printf("source:\n");
for (int y = 0; y < min(8,ny); ++y) {
for (int x = 0; x < min(13,nx); ++x)
printf("%5d ", h_src[y*nx + x]);
putchar('\n');
}
// allocate and populate input (all ones)
int *d_src, *d_dst;
size_t bytes = nx*ny*sizeof(*d_src);
CUDA(hipMalloc(&d_src, bytes));
CUDA(hipMalloc(&d_dst, bytes));
CUDA(hipMemcpy(d_src, h_src, bytes, hipMemcpyHostToDevice));
CUDA(hipMemset(d_dst, 0, bytes)); // zero-out output
// create events
hipEvent_t start, stop;
CUDA(hipEventCreate(&start));
CUDA(hipEventCreate(&stop));
size_t offset;
CUDA(hipBindTexture(&offset, tex, d_src, bytes));
// filter
dim3 thr(8,8);
dim3 blk(divup(nx, thr.x), divup(ny, thr.y));
CUDA(hipEventRecord(start, 0));
hipLaunchKernelGGL(( median2), dim3(blk),dim3(thr), 0, 0, nx, ny, d_dst, d_src, offset);
CUDA(hipGetLastError());
CUDA(hipEventRecord(stop, 0));
// time kernel
CUDA(hipEventSynchronize(stop));
float time_ms = 0;
CUDA(hipEventElapsedTime(&time_ms, start, stop));
// print upper-left corner to verify
bytes = nx*8*sizeof(*d_dst); // max needed since only care about corner
int *h_dst = (int *)malloc(bytes);
CUDA(hipMemcpy(h_dst, d_dst, bytes, hipMemcpyDeviceToHost));
printf("destination:\n");
for (int y = 0; y < min(8,ny); ++y) {
for (int x = 0; x < min(13,nx); ++x)
printf("%5d ", h_dst[y*nx + x]);
putchar('\n');
}
bytes = (9+1)*nx*ny*sizeof(*d_src); // 9 read, 1 write
printf("bandwidth: %f GB/s\n", bytes/double(1<<30) / (time_ms/1e3));
return 0;
}
| 2bd7dca91cbbf7770e2788b292b76f97dff43e63.cu | #include <stdio.h>
#include "../common.h"
__inline__
__device__ // afterward: (a <= b)
static void cmpswap(int &a, int &b)
{
if (a > b) {
int tmp = a;
a = b;
b = tmp;
}
}
texture<int> tex;
__global__
static void median2(int nx, int ny, int *d_dst, const int *d_src, size_t offset)
{
// TODO get image window (global memory or textures)
// TODO (advanced) shared memory
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
int v[9] = {0,0,0, 0,0,0, 0,0,0};
int index = 0;
for (int yy = max(0,y-1); yy <= min(ny-1,y+1); ++yy) {
for (int xx = max(0,x-1); xx <= min(nx-1,x+1); ++xx)
v[index++] = tex1Dfetch(tex, yy*nx + xx + offset);
}
// TODO sort and get the median value (bubble sort)
// TODO (advanced) 3x3 exchange sort: http://graphics.cs.williams.edu/papers/MedianShaderX6/median.pix
for (int i = 0; i < 9; ++i) {
for (int j = i + 1; j < 9; j++)
cmpswap(v[i], v[j]);
}
// TODO store the value in the output image
d_dst[y*nx + x] = v[4];
}
int main()
{
int nx = 1024, ny = 1024; // TODO larger sizes
int *h_src = (int *)malloc(nx*ny*sizeof(*h_src));
for (int x = 0; x < nx; ++x) {
for (int y = 0; y < ny; ++y)
h_src[y*nx + x] = 10 * float(rand())/RAND_MAX; // [0-10]
}
// print upper-left corner to verify
printf("source:\n");
for (int y = 0; y < min(8,ny); ++y) {
for (int x = 0; x < min(13,nx); ++x)
printf("%5d ", h_src[y*nx + x]);
putchar('\n');
}
// allocate and populate input (all ones)
int *d_src, *d_dst;
size_t bytes = nx*ny*sizeof(*d_src);
CUDA(cudaMalloc(&d_src, bytes));
CUDA(cudaMalloc(&d_dst, bytes));
CUDA(cudaMemcpy(d_src, h_src, bytes, cudaMemcpyHostToDevice));
CUDA(cudaMemset(d_dst, 0, bytes)); // zero-out output
// create events
cudaEvent_t start, stop;
CUDA(cudaEventCreate(&start));
CUDA(cudaEventCreate(&stop));
size_t offset;
CUDA(cudaBindTexture(&offset, tex, d_src, bytes));
// filter
dim3 thr(8,8);
dim3 blk(divup(nx, thr.x), divup(ny, thr.y));
CUDA(cudaEventRecord(start, 0));
median2<<<blk,thr>>>(nx, ny, d_dst, d_src, offset);
CUDA(cudaGetLastError());
CUDA(cudaEventRecord(stop, 0));
// time kernel
CUDA(cudaEventSynchronize(stop));
float time_ms = 0;
CUDA(cudaEventElapsedTime(&time_ms, start, stop));
// print upper-left corner to verify
bytes = nx*8*sizeof(*d_dst); // max needed since only care about corner
int *h_dst = (int *)malloc(bytes);
CUDA(cudaMemcpy(h_dst, d_dst, bytes, cudaMemcpyDeviceToHost));
printf("destination:\n");
for (int y = 0; y < min(8,ny); ++y) {
for (int x = 0; x < min(13,nx); ++x)
printf("%5d ", h_dst[y*nx + x]);
putchar('\n');
}
bytes = (9+1)*nx*ny*sizeof(*d_src); // 9 read, 1 write
printf("bandwidth: %f GB/s\n", bytes/double(1<<30) / (time_ms/1e3));
return 0;
}
|
38dcd8451c4fe1a1d63f6b3c9a5528513eecc440.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "circuit.h"
#include <unistd.h>
#include <iostream>
#include <stdio.h>
#include <sys/time.h>
using namespace std;
inline void cudaCheckError(int line, hipError_t ce)
{
if (ce != hipSuccess){
printf("Error: line %d %s\n", line, hipGetErrorString(ce));
exit(1);
}
}
/**
* @ Kernel Function
*/
// calculate currents gpu
__global__ void calculate_current_gpu(int num_wires,
PRECISION * wire_currents, PRECISION * wire_voltages,
int * in_ptr, int * out_ptr,
PRECISION * wire_inductance, PRECISION * wire_resistance, PRECISION * wire_capacitance,
PRECISION * node_voltage, int * wire_attr,
PRECISION * shr_voltage) {
int gridsize = gridDim.x * blockDim.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_wires) {
PRECISION temp_v[WIRE_SEGMENTS+1];
PRECISION temp_i[WIRE_SEGMENTS];
PRECISION old_i[WIRE_SEGMENTS];
PRECISION old_v[WIRE_SEGMENTS-1];
for (int it=idx; it<num_wires; it+=gridsize) {
PRECISION dt = DELTAT;
PRECISION recip_dt = 1.0f / dt;
int steps = STEPS;
int currents_offset = it * WIRE_SEGMENTS;
int voltages_offset = it * (WIRE_SEGMENTS-1);
// calc temporary variables
for (int j = 0; j < WIRE_SEGMENTS; j++) {
temp_i[j] = wire_currents[currents_offset+j];
old_i[j] = temp_i[j];
}
for (int j = 0; j < (WIRE_SEGMENTS-1); j++) {
temp_v[j+1] = wire_voltages[voltages_offset+j];
old_v[j] = temp_v[j+1];
}
// calc outer voltages to the node voltages
temp_v[0] = node_voltage[in_ptr[it]];
// Note: out-ptr need communication when parallel
if (wire_attr[it] == 0)
temp_v[WIRE_SEGMENTS] = node_voltage[out_ptr[it]];
else
temp_v[WIRE_SEGMENTS] = shr_voltage[it];
// Solve the RLC model iteratively
PRECISION inductance = wire_inductance[it];
PRECISION recip_resistance = 1.0f / (wire_resistance[it]);
PRECISION recip_capacitance = 1.0f / (wire_capacitance[it]);
for (int j = 0; j < steps; j++) {
// first, figure out the new current from the voltage differential
// and our inductance:
// dV = R*I + L*I' ==> I = (dV - L*I')/R
for (int k = 0; k < WIRE_SEGMENTS; k++) {
temp_i[k] = ((temp_v[k+1] - temp_v[k]) - (inductance * (temp_i[k] - old_i[k]) * recip_dt)) * recip_resistance;
}
// Now update the inter-node voltages
for (int k = 0; k < (WIRE_SEGMENTS-1); k++) {
temp_v[k+1] = old_v[k] + dt * (temp_i[k] - temp_i[k+1]) * recip_capacitance;
}
}
// Write out the results
for (int j = 0; j < WIRE_SEGMENTS; j++)
wire_currents[currents_offset+j] = temp_i[j];
for (int j = 0; j < (WIRE_SEGMENTS-1); j++)
wire_voltages[voltages_offset+j] = temp_v[j+1];
}// for: wires
}// if
__syncthreads();
}// calc_end
// distributed charge gpu
__global__ void distributed_charge_gpu(int num_wires,
PRECISION * wire_currents,
int * in_ptr, int * out_ptr,
PRECISION * node_charge, int * wire_attr,
PRECISION * shr_charge) {
int gridsize = gridDim.x * blockDim.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_wires) {
for (int it = idx; it < num_wires; it+=gridsize) {
int currents_offset = it * WIRE_SEGMENTS;
// calc temporary variables
PRECISION dt = DELTAT;
PRECISION in_current = -dt * (wire_currents[currents_offset]);
PRECISION out_current = -dt * (wire_currents[currents_offset+WIRE_SEGMENTS-1]);
//node_charge[in_ptr[it]] += in_current;
atomicAdd(&node_charge[in_ptr[it]], in_current);
//node_charge[out_ptr[it]] += out_current;
if (wire_attr[it] == 0)
atomicAdd(&node_charge[out_ptr[it]], out_current);
else
atomicAdd(&shr_charge[it], out_current);
}//for: iterate wires_per_pc
}// if
__syncthreads();
}// dc end
// update voltage gpu
__global__ void update_voltage_gpu( int num_nodes,
PRECISION * node_voltage, PRECISION * node_charge,
PRECISION * node_capacitance, PRECISION * node_leakage) {
int gridsize = gridDim.x * blockDim.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_nodes) {
for (int it = idx; it < num_nodes; it+=gridsize) {
PRECISION voltage = node_voltage[it];
PRECISION charge = node_charge[it];
PRECISION capacitance = node_capacitance[it];
PRECISION leakage = node_leakage[it];
voltage += charge / capacitance;
voltage *= (1.f - leakage);
//node_pc[n].voltage[it] = voltage;
node_voltage[it] = voltage;
node_charge[it] = 0.f;
}//for: iterate nodess_per_piece
}//if
__syncthreads();
}
void cudaRun(node * node_pc, wire * wire_pc, unsigned char * transfer_buf, int nodes_per_pc, int wires_per_pc, int pieces_per_pe, int peid, int num_blocks, int num_threads) {
// GPU initialization
PRECISION * d_node_capacitance, * d_node_leakage, * d_node_charge, * d_node_voltage;
PRECISION * d_wire_currents, * d_wire_voltages, * d_wire_resistance, * d_wire_inductance, * d_wire_capacitance;
PRECISION * d_shr_voltage, * d_shr_charge;
int * d_in_ptr, * d_out_ptr, * d_shr_pc, * d_wire_attr;
#if 1
for (int n=0; n<pieces_per_pe; n++) {
for (int it=0; it<nodes_per_pc; it++) {
printf("\t**node info **\n");
printf("\tvoltage: %f, charge: %f\n", node_pc[n].voltage[it], node_pc[n].charge[it]);
}
}
#endif
#if 0
for (int n = 0; n < pieces_per_pe; n++) {
for (int i = 0; i < wires_per_pc; i++) {
// circuit info
printf( "Wire %d resistance: %f, inductance: %f, capacitance: %f\n", i, wire_pc[n].resistance[i], wire_pc[n].inductance[i], wire_pc[n].capacitance[i]);
printf("** node info **\n");
printf("in_ptr/node_type:%d, capacitance: %f\n", node_pc[n].node_attr[(wire_pc[n].in_ptr[i])], node_pc[n].capacitance[(wire_pc[n].in_ptr[i])]);
printf("out_ptr/node_type:%d, capacitance: %f\n", node_pc[n].node_attr[(wire_pc[n].out_ptr[i])], node_pc[n].capacitance[(wire_pc[n].out_ptr[i])]);
}
}
#endif
// GPU allocation
cudaCheckError( __LINE__, hipMalloc((void **) &d_node_capacitance, sizeof(PRECISION)*nodes_per_pc));
cudaCheckError( __LINE__, hipMalloc((void **) &d_node_leakage , sizeof(PRECISION)*nodes_per_pc));
cudaCheckError( __LINE__, hipMalloc((void **) &d_node_charge , sizeof(PRECISION)*nodes_per_pc));
cudaCheckError( __LINE__, hipMalloc((void **) &d_node_voltage , sizeof(PRECISION)*nodes_per_pc));
cudaCheckError( __LINE__, hipMalloc((void **) &d_wire_currents , sizeof(PRECISION)*wires_per_pc*WIRE_SEGMENTS));
cudaCheckError( __LINE__, hipMalloc((void **) &d_wire_voltages , sizeof(PRECISION)*wires_per_pc*(WIRE_SEGMENTS-1)));
cudaCheckError( __LINE__, hipMalloc((void **) &d_wire_resistance , sizeof(PRECISION)*wires_per_pc));
cudaCheckError( __LINE__, hipMalloc((void **) &d_wire_inductance , sizeof(PRECISION)*wires_per_pc));
cudaCheckError( __LINE__, hipMalloc((void **) &d_wire_capacitance, sizeof(PRECISION)*wires_per_pc));
cudaCheckError( __LINE__, hipMalloc((void **) &d_in_ptr, sizeof(int)*wires_per_pc));
cudaCheckError( __LINE__, hipMalloc((void **) &d_out_ptr, sizeof(int)*wires_per_pc));
cudaCheckError( __LINE__, hipMalloc((void **) &d_shr_voltage , sizeof(PRECISION)*wires_per_pc));
cudaCheckError( __LINE__, hipMalloc((void **) &d_shr_charge , sizeof(PRECISION)*wires_per_pc));
cudaCheckError( __LINE__, hipMalloc((void **) &d_shr_pc, sizeof(int)*wires_per_pc));
cudaCheckError( __LINE__, hipMalloc((void **) &d_wire_attr , sizeof(int)*wires_per_pc));
/* computation: calculate currents & distributed charge */
for (int n=0; n<pieces_per_pe; n++) {
// CPU to GPU memcpy
cudaCheckError( __LINE__, hipMemcpy( d_node_capacitance, node_pc[n].capacitance, sizeof(PRECISION)*nodes_per_pc, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_node_leakage , node_pc[n].leakage , sizeof(PRECISION)*nodes_per_pc, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_node_charge , node_pc[n].charge , sizeof(PRECISION)*nodes_per_pc, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_node_voltage , node_pc[n].voltage , sizeof(PRECISION)*nodes_per_pc, hipMemcpyHostToDevice));
for (int i = 0; i < wires_per_pc; i++) {
int coffset = i * WIRE_SEGMENTS;
int voffset = i * (WIRE_SEGMENTS-1);
cudaCheckError( __LINE__, hipMemcpy( (d_wire_currents+coffset) , wire_pc[n].currents[i] , sizeof(PRECISION)*WIRE_SEGMENTS, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( (d_wire_voltages+voffset) , wire_pc[n].voltages[i] , sizeof(PRECISION)*(WIRE_SEGMENTS-1), hipMemcpyHostToDevice));
}
cudaCheckError( __LINE__, hipMemcpy( d_wire_resistance , wire_pc[n].resistance , sizeof(PRECISION)*wires_per_pc, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_wire_inductance , wire_pc[n].inductance , sizeof(PRECISION)*wires_per_pc, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_wire_capacitance, wire_pc[n].capacitance, sizeof(PRECISION)*wires_per_pc, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_in_ptr , wire_pc[n].in_ptr , sizeof(int)*wires_per_pc, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_out_ptr , wire_pc[n].out_ptr , sizeof(int)*wires_per_pc, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_shr_voltage , wire_pc[n].shr_voltage, sizeof(PRECISION)*wires_per_pc, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_shr_charge , wire_pc[n].shr_charge , sizeof(PRECISION)*wires_per_pc, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_shr_pc , wire_pc[n].shr_pc , sizeof(int)*wires_per_pc, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_wire_attr , wire_pc[n].wire_attr , sizeof(int)*wires_per_pc, hipMemcpyHostToDevice));
// <<<calculate currents>>> gpu
hipLaunchKernelGGL(( calculate_current_gpu), dim3(num_blocks), dim3(num_threads), 0, 0, wires_per_pc, d_wire_currents, d_wire_voltages, d_in_ptr, d_out_ptr, d_wire_inductance, d_wire_resistance, d_wire_capacitance, d_node_voltage, d_wire_attr, d_shr_voltage);
cudaCheckError( __LINE__, hipDeviceSynchronize());
// <<<distributed charge>>> gpu
hipLaunchKernelGGL(( distributed_charge_gpu), dim3(num_blocks), dim3(num_threads), 0, 0, wires_per_pc, d_wire_currents, d_in_ptr, d_out_ptr, d_node_charge, d_wire_attr, d_shr_charge);
cudaCheckError( __LINE__, hipDeviceSynchronize());
// GPU to CPU memcpy
cudaCheckError( __LINE__, hipMemcpy( node_pc[n].charge, d_node_charge , sizeof(PRECISION)*nodes_per_pc, hipMemcpyDeviceToHost));
cudaCheckError( __LINE__, hipMemcpy( wire_pc[n].shr_charge, d_shr_charge , sizeof(PRECISION)*wires_per_pc, hipMemcpyDeviceToHost));
for (int i = 0; i < wires_per_pc; i++) {
int coffset = i * WIRE_SEGMENTS;
int voffset = i * (WIRE_SEGMENTS-1);
cudaCheckError( __LINE__, hipMemcpy( wire_pc[n].currents[i], (d_wire_currents+coffset) , sizeof(PRECISION)*WIRE_SEGMENTS, hipMemcpyDeviceToHost));
cudaCheckError( __LINE__, hipMemcpy( wire_pc[n].voltages[i], (d_wire_voltages+voffset) , sizeof(PRECISION)*(WIRE_SEGMENTS-1), hipMemcpyDeviceToHost));
}// for wire_per_piece
}
#if 0
for (int n = 0; n < pieces_per_pe; n++) {
for (int i = 0; i < wires_per_pc; i++) {
// circuit info
printf( "Wire %d resistance: %f, inductance: %f, capacitance: %f\n", i, wire_pc[n].resistance[i], wire_pc[n].inductance[i], wire_pc[n].capacitance[i]);
}
}
#endif
#if 0
for (int n=0; n<pieces_per_pe; n++) {
for (int it=0; it<nodes_per_pc; it++) {
printf("\t**node info **\n");
printf("\tvoltage: %f, charge: %f\n", node_pc[n].voltage[it], node_pc[n].charge[it]);
}
}
#endif
/* post work for charge distribution */
int post_size = sizeof(int) + pieces_per_pe * sizeof(PRECISION) * (nodes_per_pc + wires_per_pc);
unsigned char * post_mem = transfer_buf;
int * index_init = reinterpret_cast<int *>(post_mem);
* index_init = peid;
PRECISION * data_init = reinterpret_cast<PRECISION *>(post_mem+sizeof(int));
// init transfer buffer
for (int n=0; n<pieces_per_pe; n++) {
#if 0
data_init = node_pc[n].charge;
data_init += nodes_per_pc * sizeof(PRECISION);
data_init = wire_pc[n].shr_charge;
data_init += wires_per_pc * sizeof(PRECISION);
#else
memcpy(data_init+n*(nodes_per_pc + wires_per_pc), node_pc[n].charge, nodes_per_pc*sizeof(PRECISION));
memcpy(data_init+n*(wires_per_pc+nodes_per_pc) + nodes_per_pc, wire_pc[n].shr_charge, wires_per_pc*sizeof(PRECISION));
#endif
}
#if 1
// GPU allocation
cudaCheckError( __LINE__, hipFree(d_node_capacitance));
cudaCheckError( __LINE__, hipFree(d_node_leakage));
cudaCheckError( __LINE__, hipFree(d_node_charge));
cudaCheckError( __LINE__, hipFree(d_node_voltage));
cudaCheckError( __LINE__, hipFree(d_wire_currents));
cudaCheckError( __LINE__, hipFree(d_wire_voltages));
cudaCheckError( __LINE__, hipFree(d_wire_resistance));
cudaCheckError( __LINE__, hipFree(d_wire_inductance));
cudaCheckError( __LINE__, hipFree(d_wire_capacitance));
cudaCheckError( __LINE__, hipFree(d_in_ptr));
cudaCheckError( __LINE__, hipFree(d_out_ptr));
cudaCheckError( __LINE__, hipFree(d_shr_voltage));
cudaCheckError( __LINE__, hipFree(d_shr_charge));
cudaCheckError( __LINE__, hipFree(d_shr_pc));
cudaCheckError( __LINE__, hipFree(d_wire_attr));
#endif
}
void cudaPost(node * node_pc, wire * wire_pc, unsigned char * result_buf, int nodes_per_pc, int wires_per_pc, int pieces_per_pe, int peid, int num_blocks, int num_threads) {
PRECISION * d_node_capacitance, * d_node_leakage, * d_node_charge, * d_node_voltage;
// GPU allocation
cudaCheckError( __LINE__, hipMalloc((void **) &d_node_capacitance, sizeof(PRECISION)*nodes_per_pc));
cudaCheckError( __LINE__, hipMalloc((void **) &d_node_leakage , sizeof(PRECISION)*nodes_per_pc));
cudaCheckError( __LINE__, hipMalloc((void **) &d_node_charge , sizeof(PRECISION)*nodes_per_pc));
cudaCheckError( __LINE__, hipMalloc((void **) &d_node_voltage , sizeof(PRECISION)*nodes_per_pc));
#if 0
for (int n=0; n<pieces_per_pe; n++) {
for (int it=0; it<nodes_per_pc; it++) {
printf("\t**node info **\n");
printf("\tvoltage: %f, charge: %f\n", node_pc[n].voltage[it], node_pc[n].charge[it]);
}
}
#endif
#if 1
for (int n = 0; n < pieces_per_pe; n++) {
for (int i = 0; i < nodes_per_pc; i++) {
// circuit info
printf( "Node %d charge: %f, voltage: %f, capacitance: %f, leakage: %f\n", i, node_pc[n].charge[i], node_pc[n].voltage[i], node_pc[n].capacitance[i], node_pc[n].leakage[i]);
}
}
#endif
for (int n=0; n<pieces_per_pe; n++) {
// CPU to GPU memcpy
cudaCheckError( __LINE__, hipMemcpy( d_node_capacitance, node_pc[n].capacitance, sizeof(PRECISION)*nodes_per_pc, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_node_leakage , node_pc[n].leakage , sizeof(PRECISION)*nodes_per_pc, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_node_charge , node_pc[n].charge , sizeof(PRECISION)*nodes_per_pc, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_node_voltage , node_pc[n].voltage , sizeof(PRECISION)*nodes_per_pc, hipMemcpyHostToDevice));
// update voltage gpu
hipLaunchKernelGGL(( update_voltage_gpu), dim3(num_blocks), dim3(num_threads), 0, 0, nodes_per_pc, d_node_voltage, d_node_charge, d_node_capacitance, d_node_leakage);
cudaCheckError( __LINE__, hipDeviceSynchronize());
// GPU to CPU memcpy
cudaCheckError( __LINE__, hipMemcpy( node_pc[n].charge, d_node_charge, sizeof(PRECISION)*nodes_per_pc, hipMemcpyDeviceToHost));
cudaCheckError( __LINE__, hipMemcpy( node_pc[n].voltage, d_node_voltage, sizeof(PRECISION)*nodes_per_pc, hipMemcpyDeviceToHost));
}
#if 0
for (int n=0; n<pieces_per_pe; n++) {
for (int it=0; it<nodes_per_pc; it++) {
printf("\t**node info **\n");
printf("\tvoltage: %f, charge: %f\n", node_pc[n].voltage[it], node_pc[n].charge[it]);
}
}
#endif
/* result work for charge distribution */
int result_size = sizeof(int) + pieces_per_pe * sizeof(PRECISION) * nodes_per_pc * 2;
unsigned char * result_mem = result_buf;
int * index_init = reinterpret_cast<int *>(result_mem);
* index_init = peid;
result_mem += sizeof(int);
PRECISION * data_init = reinterpret_cast<PRECISION *>(result_mem);
// init transfer buffer
for (int n=0; n<pieces_per_pe; n++) {
memcpy(data_init + n*nodes_per_pc*2, node_pc[n].voltage, nodes_per_pc*sizeof(PRECISION));
memcpy(data_init + n*nodes_per_pc*2 + nodes_per_pc, node_pc[n].charge , nodes_per_pc*sizeof(PRECISION));
}
// GPU allocation
cudaCheckError( __LINE__, hipFree(d_node_capacitance));
cudaCheckError( __LINE__, hipFree(d_node_leakage));
cudaCheckError( __LINE__, hipFree(d_node_charge));
cudaCheckError( __LINE__, hipFree(d_node_voltage));
}
| 38dcd8451c4fe1a1d63f6b3c9a5528513eecc440.cu | #include "circuit.h"
#include <unistd.h>
#include <iostream>
#include <stdio.h>
#include <sys/time.h>
using namespace std;
inline void cudaCheckError(int line, cudaError_t ce)
{
if (ce != cudaSuccess){
printf("Error: line %d %s\n", line, cudaGetErrorString(ce));
exit(1);
}
}
/**
* @ Kernel Function
*/
// calculate currents gpu
__global__ void calculate_current_gpu(int num_wires,
PRECISION * wire_currents, PRECISION * wire_voltages,
int * in_ptr, int * out_ptr,
PRECISION * wire_inductance, PRECISION * wire_resistance, PRECISION * wire_capacitance,
PRECISION * node_voltage, int * wire_attr,
PRECISION * shr_voltage) {
int gridsize = gridDim.x * blockDim.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_wires) {
PRECISION temp_v[WIRE_SEGMENTS+1];
PRECISION temp_i[WIRE_SEGMENTS];
PRECISION old_i[WIRE_SEGMENTS];
PRECISION old_v[WIRE_SEGMENTS-1];
for (int it=idx; it<num_wires; it+=gridsize) {
PRECISION dt = DELTAT;
PRECISION recip_dt = 1.0f / dt;
int steps = STEPS;
int currents_offset = it * WIRE_SEGMENTS;
int voltages_offset = it * (WIRE_SEGMENTS-1);
// calc temporary variables
for (int j = 0; j < WIRE_SEGMENTS; j++) {
temp_i[j] = wire_currents[currents_offset+j];
old_i[j] = temp_i[j];
}
for (int j = 0; j < (WIRE_SEGMENTS-1); j++) {
temp_v[j+1] = wire_voltages[voltages_offset+j];
old_v[j] = temp_v[j+1];
}
// calc outer voltages to the node voltages
temp_v[0] = node_voltage[in_ptr[it]];
// Note: out-ptr need communication when parallel
if (wire_attr[it] == 0)
temp_v[WIRE_SEGMENTS] = node_voltage[out_ptr[it]];
else
temp_v[WIRE_SEGMENTS] = shr_voltage[it];
// Solve the RLC model iteratively
PRECISION inductance = wire_inductance[it];
PRECISION recip_resistance = 1.0f / (wire_resistance[it]);
PRECISION recip_capacitance = 1.0f / (wire_capacitance[it]);
for (int j = 0; j < steps; j++) {
// first, figure out the new current from the voltage differential
// and our inductance:
// dV = R*I + L*I' ==> I = (dV - L*I')/R
for (int k = 0; k < WIRE_SEGMENTS; k++) {
temp_i[k] = ((temp_v[k+1] - temp_v[k]) - (inductance * (temp_i[k] - old_i[k]) * recip_dt)) * recip_resistance;
}
// Now update the inter-node voltages
for (int k = 0; k < (WIRE_SEGMENTS-1); k++) {
temp_v[k+1] = old_v[k] + dt * (temp_i[k] - temp_i[k+1]) * recip_capacitance;
}
}
// Write out the results
for (int j = 0; j < WIRE_SEGMENTS; j++)
wire_currents[currents_offset+j] = temp_i[j];
for (int j = 0; j < (WIRE_SEGMENTS-1); j++)
wire_voltages[voltages_offset+j] = temp_v[j+1];
}// for: wires
}// if
__syncthreads();
}// calc_end
// distributed charge gpu
__global__ void distributed_charge_gpu(int num_wires,
PRECISION * wire_currents,
int * in_ptr, int * out_ptr,
PRECISION * node_charge, int * wire_attr,
PRECISION * shr_charge) {
int gridsize = gridDim.x * blockDim.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_wires) {
for (int it = idx; it < num_wires; it+=gridsize) {
int currents_offset = it * WIRE_SEGMENTS;
// calc temporary variables
PRECISION dt = DELTAT;
PRECISION in_current = -dt * (wire_currents[currents_offset]);
PRECISION out_current = -dt * (wire_currents[currents_offset+WIRE_SEGMENTS-1]);
//node_charge[in_ptr[it]] += in_current;
atomicAdd(&node_charge[in_ptr[it]], in_current);
//node_charge[out_ptr[it]] += out_current;
if (wire_attr[it] == 0)
atomicAdd(&node_charge[out_ptr[it]], out_current);
else
atomicAdd(&shr_charge[it], out_current);
}//for: iterate wires_per_pc
}// if
__syncthreads();
}// dc end
// update voltage gpu
__global__ void update_voltage_gpu( int num_nodes,
PRECISION * node_voltage, PRECISION * node_charge,
PRECISION * node_capacitance, PRECISION * node_leakage) {
int gridsize = gridDim.x * blockDim.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_nodes) {
for (int it = idx; it < num_nodes; it+=gridsize) {
PRECISION voltage = node_voltage[it];
PRECISION charge = node_charge[it];
PRECISION capacitance = node_capacitance[it];
PRECISION leakage = node_leakage[it];
voltage += charge / capacitance;
voltage *= (1.f - leakage);
//node_pc[n].voltage[it] = voltage;
node_voltage[it] = voltage;
node_charge[it] = 0.f;
}//for: iterate nodess_per_piece
}//if
__syncthreads();
}
void cudaRun(node * node_pc, wire * wire_pc, unsigned char * transfer_buf, int nodes_per_pc, int wires_per_pc, int pieces_per_pe, int peid, int num_blocks, int num_threads) {
// GPU initialization
PRECISION * d_node_capacitance, * d_node_leakage, * d_node_charge, * d_node_voltage;
PRECISION * d_wire_currents, * d_wire_voltages, * d_wire_resistance, * d_wire_inductance, * d_wire_capacitance;
PRECISION * d_shr_voltage, * d_shr_charge;
int * d_in_ptr, * d_out_ptr, * d_shr_pc, * d_wire_attr;
#if 1
for (int n=0; n<pieces_per_pe; n++) {
for (int it=0; it<nodes_per_pc; it++) {
printf("\t**node info **\n");
printf("\tvoltage: %f, charge: %f\n", node_pc[n].voltage[it], node_pc[n].charge[it]);
}
}
#endif
#if 0
for (int n = 0; n < pieces_per_pe; n++) {
for (int i = 0; i < wires_per_pc; i++) {
// circuit info
printf( "Wire %d resistance: %f, inductance: %f, capacitance: %f\n", i, wire_pc[n].resistance[i], wire_pc[n].inductance[i], wire_pc[n].capacitance[i]);
printf("** node info **\n");
printf("in_ptr/node_type:%d, capacitance: %f\n", node_pc[n].node_attr[(wire_pc[n].in_ptr[i])], node_pc[n].capacitance[(wire_pc[n].in_ptr[i])]);
printf("out_ptr/node_type:%d, capacitance: %f\n", node_pc[n].node_attr[(wire_pc[n].out_ptr[i])], node_pc[n].capacitance[(wire_pc[n].out_ptr[i])]);
}
}
#endif
// GPU allocation
cudaCheckError( __LINE__, cudaMalloc((void **) &d_node_capacitance, sizeof(PRECISION)*nodes_per_pc));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_node_leakage , sizeof(PRECISION)*nodes_per_pc));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_node_charge , sizeof(PRECISION)*nodes_per_pc));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_node_voltage , sizeof(PRECISION)*nodes_per_pc));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_wire_currents , sizeof(PRECISION)*wires_per_pc*WIRE_SEGMENTS));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_wire_voltages , sizeof(PRECISION)*wires_per_pc*(WIRE_SEGMENTS-1)));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_wire_resistance , sizeof(PRECISION)*wires_per_pc));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_wire_inductance , sizeof(PRECISION)*wires_per_pc));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_wire_capacitance, sizeof(PRECISION)*wires_per_pc));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_in_ptr, sizeof(int)*wires_per_pc));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_out_ptr, sizeof(int)*wires_per_pc));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_shr_voltage , sizeof(PRECISION)*wires_per_pc));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_shr_charge , sizeof(PRECISION)*wires_per_pc));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_shr_pc, sizeof(int)*wires_per_pc));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_wire_attr , sizeof(int)*wires_per_pc));
/* computation: calculate currents & distributed charge */
for (int n=0; n<pieces_per_pe; n++) {
// CPU to GPU memcpy
cudaCheckError( __LINE__, cudaMemcpy( d_node_capacitance, node_pc[n].capacitance, sizeof(PRECISION)*nodes_per_pc, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_node_leakage , node_pc[n].leakage , sizeof(PRECISION)*nodes_per_pc, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_node_charge , node_pc[n].charge , sizeof(PRECISION)*nodes_per_pc, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_node_voltage , node_pc[n].voltage , sizeof(PRECISION)*nodes_per_pc, cudaMemcpyHostToDevice));
for (int i = 0; i < wires_per_pc; i++) {
int coffset = i * WIRE_SEGMENTS;
int voffset = i * (WIRE_SEGMENTS-1);
cudaCheckError( __LINE__, cudaMemcpy( (d_wire_currents+coffset) , wire_pc[n].currents[i] , sizeof(PRECISION)*WIRE_SEGMENTS, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( (d_wire_voltages+voffset) , wire_pc[n].voltages[i] , sizeof(PRECISION)*(WIRE_SEGMENTS-1), cudaMemcpyHostToDevice));
}
cudaCheckError( __LINE__, cudaMemcpy( d_wire_resistance , wire_pc[n].resistance , sizeof(PRECISION)*wires_per_pc, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_wire_inductance , wire_pc[n].inductance , sizeof(PRECISION)*wires_per_pc, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_wire_capacitance, wire_pc[n].capacitance, sizeof(PRECISION)*wires_per_pc, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_in_ptr , wire_pc[n].in_ptr , sizeof(int)*wires_per_pc, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_out_ptr , wire_pc[n].out_ptr , sizeof(int)*wires_per_pc, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_shr_voltage , wire_pc[n].shr_voltage, sizeof(PRECISION)*wires_per_pc, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_shr_charge , wire_pc[n].shr_charge , sizeof(PRECISION)*wires_per_pc, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_shr_pc , wire_pc[n].shr_pc , sizeof(int)*wires_per_pc, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_wire_attr , wire_pc[n].wire_attr , sizeof(int)*wires_per_pc, cudaMemcpyHostToDevice));
// <<<calculate currents>>> gpu
calculate_current_gpu<<<num_blocks, num_threads>>>(wires_per_pc, d_wire_currents, d_wire_voltages, d_in_ptr, d_out_ptr, d_wire_inductance, d_wire_resistance, d_wire_capacitance, d_node_voltage, d_wire_attr, d_shr_voltage);
cudaCheckError( __LINE__, cudaDeviceSynchronize());
// <<<distributed charge>>> gpu
distributed_charge_gpu<<<num_blocks, num_threads>>>(wires_per_pc, d_wire_currents, d_in_ptr, d_out_ptr, d_node_charge, d_wire_attr, d_shr_charge);
cudaCheckError( __LINE__, cudaDeviceSynchronize());
// GPU to CPU memcpy
cudaCheckError( __LINE__, cudaMemcpy( node_pc[n].charge, d_node_charge , sizeof(PRECISION)*nodes_per_pc, cudaMemcpyDeviceToHost));
cudaCheckError( __LINE__, cudaMemcpy( wire_pc[n].shr_charge, d_shr_charge , sizeof(PRECISION)*wires_per_pc, cudaMemcpyDeviceToHost));
for (int i = 0; i < wires_per_pc; i++) {
int coffset = i * WIRE_SEGMENTS;
int voffset = i * (WIRE_SEGMENTS-1);
cudaCheckError( __LINE__, cudaMemcpy( wire_pc[n].currents[i], (d_wire_currents+coffset) , sizeof(PRECISION)*WIRE_SEGMENTS, cudaMemcpyDeviceToHost));
cudaCheckError( __LINE__, cudaMemcpy( wire_pc[n].voltages[i], (d_wire_voltages+voffset) , sizeof(PRECISION)*(WIRE_SEGMENTS-1), cudaMemcpyDeviceToHost));
}// for wire_per_piece
}
#if 0
for (int n = 0; n < pieces_per_pe; n++) {
for (int i = 0; i < wires_per_pc; i++) {
// circuit info
printf( "Wire %d resistance: %f, inductance: %f, capacitance: %f\n", i, wire_pc[n].resistance[i], wire_pc[n].inductance[i], wire_pc[n].capacitance[i]);
}
}
#endif
#if 0
for (int n=0; n<pieces_per_pe; n++) {
for (int it=0; it<nodes_per_pc; it++) {
printf("\t**node info **\n");
printf("\tvoltage: %f, charge: %f\n", node_pc[n].voltage[it], node_pc[n].charge[it]);
}
}
#endif
/* post work for charge distribution */
int post_size = sizeof(int) + pieces_per_pe * sizeof(PRECISION) * (nodes_per_pc + wires_per_pc);
unsigned char * post_mem = transfer_buf;
int * index_init = reinterpret_cast<int *>(post_mem);
* index_init = peid;
PRECISION * data_init = reinterpret_cast<PRECISION *>(post_mem+sizeof(int));
// init transfer buffer
for (int n=0; n<pieces_per_pe; n++) {
#if 0
data_init = node_pc[n].charge;
data_init += nodes_per_pc * sizeof(PRECISION);
data_init = wire_pc[n].shr_charge;
data_init += wires_per_pc * sizeof(PRECISION);
#else
memcpy(data_init+n*(nodes_per_pc + wires_per_pc), node_pc[n].charge, nodes_per_pc*sizeof(PRECISION));
memcpy(data_init+n*(wires_per_pc+nodes_per_pc) + nodes_per_pc, wire_pc[n].shr_charge, wires_per_pc*sizeof(PRECISION));
#endif
}
#if 1
// GPU allocation
cudaCheckError( __LINE__, cudaFree(d_node_capacitance));
cudaCheckError( __LINE__, cudaFree(d_node_leakage));
cudaCheckError( __LINE__, cudaFree(d_node_charge));
cudaCheckError( __LINE__, cudaFree(d_node_voltage));
cudaCheckError( __LINE__, cudaFree(d_wire_currents));
cudaCheckError( __LINE__, cudaFree(d_wire_voltages));
cudaCheckError( __LINE__, cudaFree(d_wire_resistance));
cudaCheckError( __LINE__, cudaFree(d_wire_inductance));
cudaCheckError( __LINE__, cudaFree(d_wire_capacitance));
cudaCheckError( __LINE__, cudaFree(d_in_ptr));
cudaCheckError( __LINE__, cudaFree(d_out_ptr));
cudaCheckError( __LINE__, cudaFree(d_shr_voltage));
cudaCheckError( __LINE__, cudaFree(d_shr_charge));
cudaCheckError( __LINE__, cudaFree(d_shr_pc));
cudaCheckError( __LINE__, cudaFree(d_wire_attr));
#endif
}
void cudaPost(node * node_pc, wire * wire_pc, unsigned char * result_buf, int nodes_per_pc, int wires_per_pc, int pieces_per_pe, int peid, int num_blocks, int num_threads) {
PRECISION * d_node_capacitance, * d_node_leakage, * d_node_charge, * d_node_voltage;
// GPU allocation
cudaCheckError( __LINE__, cudaMalloc((void **) &d_node_capacitance, sizeof(PRECISION)*nodes_per_pc));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_node_leakage , sizeof(PRECISION)*nodes_per_pc));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_node_charge , sizeof(PRECISION)*nodes_per_pc));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_node_voltage , sizeof(PRECISION)*nodes_per_pc));
#if 0
for (int n=0; n<pieces_per_pe; n++) {
for (int it=0; it<nodes_per_pc; it++) {
printf("\t**node info **\n");
printf("\tvoltage: %f, charge: %f\n", node_pc[n].voltage[it], node_pc[n].charge[it]);
}
}
#endif
#if 1
for (int n = 0; n < pieces_per_pe; n++) {
for (int i = 0; i < nodes_per_pc; i++) {
// circuit info
printf( "Node %d charge: %f, voltage: %f, capacitance: %f, leakage: %f\n", i, node_pc[n].charge[i], node_pc[n].voltage[i], node_pc[n].capacitance[i], node_pc[n].leakage[i]);
}
}
#endif
for (int n=0; n<pieces_per_pe; n++) {
// CPU to GPU memcpy
cudaCheckError( __LINE__, cudaMemcpy( d_node_capacitance, node_pc[n].capacitance, sizeof(PRECISION)*nodes_per_pc, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_node_leakage , node_pc[n].leakage , sizeof(PRECISION)*nodes_per_pc, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_node_charge , node_pc[n].charge , sizeof(PRECISION)*nodes_per_pc, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_node_voltage , node_pc[n].voltage , sizeof(PRECISION)*nodes_per_pc, cudaMemcpyHostToDevice));
// update voltage gpu
update_voltage_gpu<<<num_blocks, num_threads>>>(nodes_per_pc, d_node_voltage, d_node_charge, d_node_capacitance, d_node_leakage);
cudaCheckError( __LINE__, cudaDeviceSynchronize());
// GPU to CPU memcpy
cudaCheckError( __LINE__, cudaMemcpy( node_pc[n].charge, d_node_charge, sizeof(PRECISION)*nodes_per_pc, cudaMemcpyDeviceToHost));
cudaCheckError( __LINE__, cudaMemcpy( node_pc[n].voltage, d_node_voltage, sizeof(PRECISION)*nodes_per_pc, cudaMemcpyDeviceToHost));
}
#if 0
for (int n=0; n<pieces_per_pe; n++) {
for (int it=0; it<nodes_per_pc; it++) {
printf("\t**node info **\n");
printf("\tvoltage: %f, charge: %f\n", node_pc[n].voltage[it], node_pc[n].charge[it]);
}
}
#endif
/* result work for charge distribution */
int result_size = sizeof(int) + pieces_per_pe * sizeof(PRECISION) * nodes_per_pc * 2;
unsigned char * result_mem = result_buf;
int * index_init = reinterpret_cast<int *>(result_mem);
* index_init = peid;
result_mem += sizeof(int);
PRECISION * data_init = reinterpret_cast<PRECISION *>(result_mem);
// init transfer buffer
for (int n=0; n<pieces_per_pe; n++) {
memcpy(data_init + n*nodes_per_pc*2, node_pc[n].voltage, nodes_per_pc*sizeof(PRECISION));
memcpy(data_init + n*nodes_per_pc*2 + nodes_per_pc, node_pc[n].charge , nodes_per_pc*sizeof(PRECISION));
}
// GPU allocation
cudaCheckError( __LINE__, cudaFree(d_node_capacitance));
cudaCheckError( __LINE__, cudaFree(d_node_leakage));
cudaCheckError( __LINE__, cudaFree(d_node_charge));
cudaCheckError( __LINE__, cudaFree(d_node_voltage));
}
|
563e35026ee2b2a90d51347ab94f9f67cc3174a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void gSum_vec(float* v1, float* v2, int N){
int i=threadIdx.x+blockIdx.x*blockDim.x;
v1[i]+=v2[i]+1.0;
}
void Sum_vec(float* v1, float* v2, float *w, int N){
float *u1,*u2;
hipMalloc((void **) &u1, N*sizeof(float));
hipMalloc((void **) &u2, N*sizeof(float));
hipMemcpy(u1, v1, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(u2, v2, N*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( gSum_vec), dim3(dim3(N/512+((N%512)?1:0))),dim3(dim3(512)), 0, 0, u1,u2,N);
hipDeviceSynchronize();
hipMemcpy(w, u1, N*sizeof(float), hipMemcpyDeviceToHost);
hipFree(u1);
hipFree(u2);
}
| 563e35026ee2b2a90d51347ab94f9f67cc3174a6.cu | __global__ void gSum_vec(float* v1, float* v2, int N){
int i=threadIdx.x+blockIdx.x*blockDim.x;
v1[i]+=v2[i]+1.0;
}
void Sum_vec(float* v1, float* v2, float *w, int N){
float *u1,*u2;
cudaMalloc((void **) &u1, N*sizeof(float));
cudaMalloc((void **) &u2, N*sizeof(float));
cudaMemcpy(u1, v1, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(u2, v2, N*sizeof(float), cudaMemcpyHostToDevice);
gSum_vec<<<dim3(N/512+((N%512)?1:0)),dim3(512)>>>(u1,u2,N);
cudaDeviceSynchronize();
cudaMemcpy(w, u1, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(u1);
cudaFree(u2);
}
|
e6458a38e60239273b8c05413ee23dda88cb6696.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef _WIN32
#pragma warning(disable : 4244)
#endif
#include <hip/hip_fp16.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "mixed_precision_scale.h"
namespace onnxruntime {
namespace cuda {
template <typename SrcT, typename DstT>
__global__ void _MixedPrecisionScale(
const SrcT* input_data,
const float* scale_data,
DstT* output_data,
CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
output_data[id] = static_cast<DstT>(*scale_data * static_cast<float>(input_data[id]));
}
template <typename SrcT, typename DstT>
void Impl_MixedPrecisionScale(
hipStream_t stream,
const SrcT* input_data,
const float* scale_data,
DstT* output_data,
size_t count){
int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock));
CUDA_LONG N = static_cast<CUDA_LONG>(count);
hipLaunchKernelGGL(( _MixedPrecisionScale<SrcT, DstT>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream,
input_data,
scale_data,
output_data,
N);
}
#define SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(SrcT, DstT) \
template void Impl_MixedPrecisionScale<SrcT, DstT>( \
hipStream_t stream, \
const SrcT* input_data, \
const float* scale_data, \
DstT* output_data, \
size_t count);
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, half)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, float)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, half)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, float)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, BFloat16)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, float)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, BFloat16)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, half)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, BFloat16)
} // namespace cuda
} // namespace onnxruntime
| e6458a38e60239273b8c05413ee23dda88cb6696.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef _WIN32
#pragma warning(disable : 4244)
#endif
#include <cuda_fp16.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "mixed_precision_scale.h"
namespace onnxruntime {
namespace cuda {
template <typename SrcT, typename DstT>
__global__ void _MixedPrecisionScale(
const SrcT* input_data,
const float* scale_data,
DstT* output_data,
CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
output_data[id] = static_cast<DstT>(*scale_data * static_cast<float>(input_data[id]));
}
template <typename SrcT, typename DstT>
void Impl_MixedPrecisionScale(
cudaStream_t stream,
const SrcT* input_data,
const float* scale_data,
DstT* output_data,
size_t count){
int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock));
CUDA_LONG N = static_cast<CUDA_LONG>(count);
_MixedPrecisionScale<SrcT, DstT><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
input_data,
scale_data,
output_data,
N);
}
#define SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(SrcT, DstT) \
template void Impl_MixedPrecisionScale<SrcT, DstT>( \
cudaStream_t stream, \
const SrcT* input_data, \
const float* scale_data, \
DstT* output_data, \
size_t count);
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, half)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, float)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, half)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, float)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, BFloat16)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, float)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, BFloat16)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, half)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, BFloat16)
} // namespace cuda
} // namespace onnxruntime
|
09cac2e978dc030124850d04d619211d219c0b79.hip | // !!! This is a file automatically generated by hipify!!!
#include "common/book.h"
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
int main(void) {
hipDeviceProp_t prop;
int count;
HANDLE_ERROR(hipGetDeviceCount(&count));
for (int i = 0; i < count; ++i) {
HANDLE_ERROR(hipGetDeviceProperties(&prop, i));
printf(" --- General information for device %d ---\n", i);
printf("Name: %s\n", prop.name);
printf("Compute capability: %d.%d\n", prop.major, prop.minor);
printf("Clock rate: %d\n", prop.clockRate);
printf("Device copy overlap: ");
if (prop.deviceOverlap) {
printf("Enabled\n");
} else {
printf("Disabled\n");
}
printf("Kernel execition timeout: ");
if (prop.kernelExecTimeoutEnabled) {
printf("Enabled\n");
} else {
printf("Disabled\n");
}
printf(" --- Memory information for device %d ---\n", i);
printf("Total global mem: %ld\n", prop.totalGlobalMem);
printf("Total constant mem: %ld\n", prop.totalConstMem);
printf("Max mem pitch: %ld\n", prop.memPitch);
printf("Texture alignment: %ld\n", prop.textureAlignment);
printf(" --- MP Information for device %d ---\n", i);
printf("Multiprocessor count: %d\n", prop.multiProcessorCount);
printf("Shared mem per mp: %ld\n", prop.sharedMemPerBlock);
printf("Registers per mp: %d\n", prop.regsPerBlock);
printf("Threads in warp: %d\n", prop.warpSize);
printf("Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0],
prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0],
prop.maxGridSize[1], prop.maxGridSize[2]);
printf("\n");
}
return 0;
}
| 09cac2e978dc030124850d04d619211d219c0b79.cu | #include "common/book.h"
#include <cuda.h>
#include <cuda_device_runtime_api.h>
#include <cuda_runtime.h>
#include <stdio.h>
int main(void) {
cudaDeviceProp prop;
int count;
HANDLE_ERROR(cudaGetDeviceCount(&count));
for (int i = 0; i < count; ++i) {
HANDLE_ERROR(cudaGetDeviceProperties(&prop, i));
printf(" --- General information for device %d ---\n", i);
printf("Name: %s\n", prop.name);
printf("Compute capability: %d.%d\n", prop.major, prop.minor);
printf("Clock rate: %d\n", prop.clockRate);
printf("Device copy overlap: ");
if (prop.deviceOverlap) {
printf("Enabled\n");
} else {
printf("Disabled\n");
}
printf("Kernel execition timeout: ");
if (prop.kernelExecTimeoutEnabled) {
printf("Enabled\n");
} else {
printf("Disabled\n");
}
printf(" --- Memory information for device %d ---\n", i);
printf("Total global mem: %ld\n", prop.totalGlobalMem);
printf("Total constant mem: %ld\n", prop.totalConstMem);
printf("Max mem pitch: %ld\n", prop.memPitch);
printf("Texture alignment: %ld\n", prop.textureAlignment);
printf(" --- MP Information for device %d ---\n", i);
printf("Multiprocessor count: %d\n", prop.multiProcessorCount);
printf("Shared mem per mp: %ld\n", prop.sharedMemPerBlock);
printf("Registers per mp: %d\n", prop.regsPerBlock);
printf("Threads in warp: %d\n", prop.warpSize);
printf("Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0],
prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0],
prop.maxGridSize[1], prop.maxGridSize[2]);
printf("\n");
}
return 0;
}
|
579dc2c3555d3ca11a56135b3a562104613a8dc3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "matrixfunctions.h"
#include <stdio.h>
int updateDiag(double *,int,double *,int,double ,int,int);
int updateMatrix(double *,int,double*,int,double,int,int);
void matrixPol(double *,int ,double *,int,int,int,double*,int);
void initializeZero(double*,int,int,int);
void matrixAdd(double*,int,double*,int,double*,int,int,int);
void naiveMatrixPol(double*,int,double*,int,int,int,double*,int);
__global__
void setValue(double* dev_vec,int value , int size){
/* device kernel that takes for input a vector and an integer and set all cells of vector to this integer */
int tid = (gridDim.y*blockIdx.x+blockIdx.y)*blockDim.x*blockDim.y+blockDim.y*threadIdx.x+threadIdx.y;
if (tid < size) {
dev_vec[tid] = value;
}
}
void matrixAdd(double* C,int incC,double* A,int incA,double* B,int incB,int rows,int cols){
/* This function implements the matrix addition in multiple gpus.
Every thread (device) is responsible for the outcome of a number of rows of matrix C.
eg.
| a11 a12 a13 .... a1N | <--- Gpu 1
| a21 a22 a23 .... a2N |
| a31 a32 a33 .... a3N | <--- Gpu 2
| a41 a42 a43 .... a4N |
| . . . .... . | etc
| aM1 aM2 aM3 .... aMN |
*/
int tmpDeviceCount = deviceCount;
if(rows<deviceCount) deviceCount =1;
omp_set_num_threads(deviceCount);
#pragma omp parallel
{
double *dev_A,*dev_B,*dev_C;
double one = 1.0;
int numThread = omp_get_thread_num();
hipSetDevice(numThread);
int sizeRows = rows / deviceCount;
int offsetA = numThread*sizeRows*incA;
int offsetB = numThread*sizeRows*incB;
int offsetC = numThread*sizeRows*incC;
if(numThread == deviceCount -1 ) sizeRows += rows%deviceCount;
hipMalloc((void**)&dev_A,sizeRows*cols*sizeof(double));
hipMalloc((void**)&dev_B,sizeRows*cols*sizeof(double));
hipMalloc((void**)&dev_C,sizeRows*cols*sizeof(double));
hipblasSetMatrix(cols,sizeRows,sizeof(double),&A[offsetA],incA,dev_A,cols);
hipblasSetMatrix(cols,sizeRows,sizeof(double),&B[offsetB],incB,dev_B,cols);
hipblasDgeam(cublasHandler[numThread],HIPBLAS_OP_N,HIPBLAS_OP_N,cols,sizeRows,&one,dev_A,cols,&one,dev_B,cols,dev_C,cols);
hipblasGetMatrix(cols,sizeRows,sizeof(double),dev_C,cols,&C[offsetC],incC);
//printf("1\n");
hipFree(dev_A);
hipFree(dev_B);
hipFree(dev_C);
}
deviceCount = tmpDeviceCount;
}
int updateDiag(double *outputMatrix,int incOutput,double *inputMatrix,int incInput,double alpha,int rows,int cols){
/* This function implements
B = aI + A
The idea goes like this
set a vector v to ones
multiply v with a and add it to the diag of the input matrix
last step is implemented using hipblasDaxpy (y = y +a*x where y,x are vectors and a is scalar)
Every thread (device) is responsible for a block of output vector.
eg.
| v1 | <-- Gpu 1
| v2 |
| v3 | <-- Gpu 2
| v4 |
| . | etc
| vN |
*/
int tmpDeviceCount = deviceCount;
if(rows < deviceCount) deviceCount =1;
int *errorStatus = (int*)malloc(deviceCount*sizeof(int));
omp_set_num_threads(deviceCount);
#pragma omp parallel
{
int numThread = omp_get_thread_num();
hipSetDevice(numThread);
int sizeDiag = rows / deviceCount;
int offsetIn = numThread*sizeDiag*(incInput+1);
int offsetOut = numThread*sizeDiag*(incOutput+1);
if(numThread == deviceCount -1 ) sizeDiag += rows % deviceCount;
double *dev_input,*dev_output;
hipMalloc((void**)&dev_output,sizeDiag*sizeof(double));
hipMalloc((void**)&dev_input,sizeDiag*sizeof(double));
if(hipblasSetVector(sizeDiag,sizeof(double),&inputMatrix[offsetIn],incInput+1,dev_input,1)!=HIPBLAS_STATUS_SUCCESS)
errorStatus[numThread] = ERROR;
int grid = ceil((sizeDiag)/1024);
if(grid<=0)grid=1;
int BLOCK_SIZE=512;
int number_of_blocks = ((sizeDiag) + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 gridDim(number_of_blocks, 1);
dim3 blockDim(BLOCK_SIZE, 1);
hipLaunchKernelGGL(( setValue), dim3(gridDim), dim3(blockDim), 0, 0, dev_output,1,sizeDiag);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
hipblasDaxpy(cublasHandler[numThread],sizeDiag,&alpha,dev_output,1,dev_input,1);
if(hipblasGetVector(sizeDiag,sizeof(double),dev_input,1,&outputMatrix[offsetOut],incOutput+1)!=HIPBLAS_STATUS_SUCCESS)
errorStatus[numThread] = ERROR;
hipFree(dev_input);
hipFree(dev_output);
}
deviceCount = tmpDeviceCount;
int index;
for(index=0;index<deviceCount;index++){
if(errorStatus[index] != OK ) return ERROR;
}
return OK;
}
int updateMatrix(double *outMatrix,int incOut,double *inMatrix,int incIn,double alpha,int rows,int cols){
/* This function implements the update of a matrix
B = a*A;
Core of this function is the hipblasDscal that does this exact operation.
The blocking of the input matrix goes like this
Every thread (device) gets a number of rows that is responsible for the outcome
eg
| a11 a12 a13 .... a1N | <--- Gpu 1
| a21 a22 a23 .... a2N |
| a31 a32 a33 .... a3N | <--- Gpu 2
| a41 a42 a43 .... a4N |
| . . . .... . | etc
| aM1 aM2 aM3 .... aMN |
*/
int tmpDeviceCount = deviceCount;
if(rows < deviceCount) deviceCount =1;
int *errorStatus = (int*)malloc(deviceCount*sizeof(int));
omp_set_num_threads(deviceCount);
#pragma omp parallel
{
int numThread = omp_get_thread_num();
hipSetDevice(numThread);
int sizeRows = rows / deviceCount;
int offsetIn = numThread*sizeRows*incIn;
int offsetOut = numThread*sizeRows*incOut;
if(numThread == deviceCount-1) sizeRows += rows%deviceCount;
double *dev_inMatrix;
hipMalloc((void**)&dev_inMatrix,sizeRows*cols*sizeof(double));
hipblasSetMatrix(cols,sizeRows,sizeof(double),&inMatrix[offsetIn],incIn,dev_inMatrix,cols);
hipblasDscal(cublasHandler[numThread],sizeRows*cols,&alpha,dev_inMatrix,1);
hipblasGetMatrix(cols,sizeRows,sizeof(double),dev_inMatrix,cols,(void*)&outMatrix[offsetOut],incOut);
hipFree(dev_inMatrix);
}
deviceCount = tmpDeviceCount;
int index;
for(index=0;index<deviceCount;index++){
if(errorStatus[index] != OK ) return ERROR;
}
return OK;
}
int divisor(int number){
/* Just a nothing-to-say-about-function passing by */
int i;
for (i = number / 2; i >= 1; i--)
{
if (number % i == 0)
{
break;
}
}
return i;
}
void matrixMul(double* C,int ldc,double* A,int lda,double* B,int ldb,int m,int k,int n){
/* input A,B
output C
*/
/* This function implements the block matrix multiplication
Function divisor given a number a returns primitive number b so that b*c = a.
So, given a we know b and we find c. By those two numbers (b and c )we define the number of rows and columns
inside of a block.
After that every thread (device) takes an outcome-block and the needed income blocks and
eventually calling the GPU_strassen function for doing the multiplication.
*/
int numA,numB;
int stepA,stepB;
numA = divisor(deviceCount);
if(numA==0)
numA=1;
numB = deviceCount/numA;
stepA = m/numA; // Number of rows per block
stepB = n/numB; // Number of columns per block
int i,j; // for counters
int tmpDeviceCount=deviceCount;
if(deviceCount > m || deviceCount > n) deviceCount = 1;
omp_set_num_threads(deviceCount);
#pragma omp parallel for collapse(2)
for(i=0;i<numA;i++){
for(j=0;j<numB;j++){
unsigned int numThread = omp_get_thread_num();
if(hipSetDevice(numThread)!=hipSuccess){
printf("ERROR");
//exit;
}
if(i==numA-1 && j==numB-1){
GPU_strassen(cublasHandler[numThread],&A[i*stepA*lda],&B[j*stepB],&C[i*stepA*ldc+j*stepB],lda,ldb,ldc,stepA+m % numA,k,stepA+m % numA,k,stepB+n % numB,stepB+n % numB,1);
}else if(i==numA-1 && j!=numB-1){
GPU_strassen(cublasHandler[numThread],&A[i*stepA*lda],&B[j*stepB],&C[i*stepA*ldc+j*stepB],lda,ldb,ldc,stepA+m%numA,k,m%numA+stepA,k,stepB,stepB,1);
}else if(i!=numA-1 && j==numB-1){
GPU_strassen(cublasHandler[numThread],&A[i*stepA*lda],&B[j*stepB],&C[i*stepA*ldc+j*stepB],lda,ldb,ldc,stepA,k,stepA,k,stepB+n%numB,stepB+n%numB,1);
} else {
GPU_strassen(cublasHandler[numThread],&A[i*stepA*lda],&B[j*stepB],&C[i*stepA*ldc+j*stepB],lda,ldb,ldc,stepA,k,stepA,k,stepB,stepB,1);
}
}
}
deviceCount = tmpDeviceCount;
}
void initializeZero(double *X,int incX,int rows,int cols){
/* This function given an input matrix X, returns matrix X with all values set to 0.
Core of this function if the function hipMemset()
*/
int tmpDeviceCount = deviceCount;
if(rows<deviceCount) deviceCount = 1;
//deviceCount=1;
omp_set_num_threads(deviceCount);
#pragma omp parallel
{
int numThread = omp_get_thread_num();
hipSetDevice(numThread);
int sizeRows = rows / deviceCount;
int offsetX = numThread*sizeRows*incX;
if(numThread == deviceCount-1) sizeRows += rows % deviceCount;
double *dev_X;
hipMalloc((void**)&dev_X,sizeRows*cols*sizeof(double));
hipMemset(dev_X,0,sizeRows*cols*sizeof(double));
hipblasGetMatrix(cols,sizeRows,sizeof(double),dev_X,cols,&X[offsetX],incX);
hipFree(dev_X);
}
deviceCount = tmpDeviceCount;
}
void matrixPol(double *B,int incB,double *A,int incA,int rows,int cols,double* coef,int coefNum){
/**** B = f(A,coef); ****/
/* This function is responsible for the computational flow of the polynomial
Inside this function 2 additional matrices are been used
Step1: Calculate A^2
Step2: Caclulate first or first two orders of the polynomial,by doing that we are sure that inside the loop are even orders
Step3: Compute rest of the polynomial inside the loop
*/
double *tmpMatrix = (double*)malloc(rows*cols*sizeof(double));
double *A_2 = (double*)malloc(rows*cols*sizeof(double));
matrixMul(A_2,cols,A,incA,A,incA,rows,cols,cols); /* A_2 = A*A */
int loopStart;
if( (coefNum % 2) == 0 ) {
/* if polynomial order is even compute the aI + bX */
updateMatrix(B,incB,A,incA,coef[coefNum-1],rows,cols);
updateDiag(B,incB,B,incB,coef[coefNum-2],rows,cols);
loopStart=coefNum-3;
}else{
/* if polynomial order is odd compute the aI */
initializeZero(tmpMatrix,cols,rows,cols);
updateDiag(B,incB,tmpMatrix,cols,coef[coefNum-1],rows,cols);
loopStart=coefNum-2;
}
int i;
for(i =loopStart;i>=0;i=i-2){
/*Rest of the polynomial orders are computed here */
matrixMul(B,incB,A_2,cols,B,incB,rows,cols,cols); /*B = X_2*B */
updateMatrix(tmpMatrix,cols,A,incA,coef[i],rows,cols);/* a*X */
updateDiag(tmpMatrix,cols,tmpMatrix,cols,coef[i-1],rows,cols); /* b*I+a*X */
matrixAdd(B,incB,B,incB,tmpMatrix,cols,rows,cols); /* B =B + b*I+a*X */
}
free(tmpMatrix);
free(A_2);
}
| 579dc2c3555d3ca11a56135b3a562104613a8dc3.cu | #include "matrixfunctions.h"
#include <stdio.h>
int updateDiag(double *,int,double *,int,double ,int,int);
int updateMatrix(double *,int,double*,int,double,int,int);
void matrixPol(double *,int ,double *,int,int,int,double*,int);
void initializeZero(double*,int,int,int);
void matrixAdd(double*,int,double*,int,double*,int,int,int);
void naiveMatrixPol(double*,int,double*,int,int,int,double*,int);
__global__
void setValue(double* dev_vec,int value , int size){
/* device kernel that takes for input a vector and an integer and set all cells of vector to this integer */
int tid = (gridDim.y*blockIdx.x+blockIdx.y)*blockDim.x*blockDim.y+blockDim.y*threadIdx.x+threadIdx.y;
if (tid < size) {
dev_vec[tid] = value;
}
}
void matrixAdd(double* C,int incC,double* A,int incA,double* B,int incB,int rows,int cols){
/* This function implements the matrix addition in multiple gpus.
Every thread (device) is responsible for the outcome of a number of rows of matrix C.
eg.
| a11 a12 a13 .... a1N | <--- Gpu 1
| a21 a22 a23 .... a2N |
| a31 a32 a33 .... a3N | <--- Gpu 2
| a41 a42 a43 .... a4N |
| . . . .... . | etc
| aM1 aM2 aM3 .... aMN |
*/
int tmpDeviceCount = deviceCount;
if(rows<deviceCount) deviceCount =1;
omp_set_num_threads(deviceCount);
#pragma omp parallel
{
double *dev_A,*dev_B,*dev_C;
double one = 1.0;
int numThread = omp_get_thread_num();
cudaSetDevice(numThread);
int sizeRows = rows / deviceCount;
int offsetA = numThread*sizeRows*incA;
int offsetB = numThread*sizeRows*incB;
int offsetC = numThread*sizeRows*incC;
if(numThread == deviceCount -1 ) sizeRows += rows%deviceCount;
cudaMalloc((void**)&dev_A,sizeRows*cols*sizeof(double));
cudaMalloc((void**)&dev_B,sizeRows*cols*sizeof(double));
cudaMalloc((void**)&dev_C,sizeRows*cols*sizeof(double));
cublasSetMatrix(cols,sizeRows,sizeof(double),&A[offsetA],incA,dev_A,cols);
cublasSetMatrix(cols,sizeRows,sizeof(double),&B[offsetB],incB,dev_B,cols);
cublasDgeam(cublasHandler[numThread],CUBLAS_OP_N,CUBLAS_OP_N,cols,sizeRows,&one,dev_A,cols,&one,dev_B,cols,dev_C,cols);
cublasGetMatrix(cols,sizeRows,sizeof(double),dev_C,cols,&C[offsetC],incC);
//printf("1\n");
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_C);
}
deviceCount = tmpDeviceCount;
}
int updateDiag(double *outputMatrix,int incOutput,double *inputMatrix,int incInput,double alpha,int rows,int cols){
/* This function implements
B = aI + A
The idea goes like this
set a vector v to ones
multiply v with a and add it to the diag of the input matrix
last step is implemented using cublasDaxpy (y = y +a*x where y,x are vectors and a is scalar)
Every thread (device) is responsible for a block of output vector.
eg.
| v1 | <-- Gpu 1
| v2 |
| v3 | <-- Gpu 2
| v4 |
| . | etc
| vN |
*/
int tmpDeviceCount = deviceCount;
if(rows < deviceCount) deviceCount =1;
int *errorStatus = (int*)malloc(deviceCount*sizeof(int));
omp_set_num_threads(deviceCount);
#pragma omp parallel
{
int numThread = omp_get_thread_num();
cudaSetDevice(numThread);
int sizeDiag = rows / deviceCount;
int offsetIn = numThread*sizeDiag*(incInput+1);
int offsetOut = numThread*sizeDiag*(incOutput+1);
if(numThread == deviceCount -1 ) sizeDiag += rows % deviceCount;
double *dev_input,*dev_output;
cudaMalloc((void**)&dev_output,sizeDiag*sizeof(double));
cudaMalloc((void**)&dev_input,sizeDiag*sizeof(double));
if(cublasSetVector(sizeDiag,sizeof(double),&inputMatrix[offsetIn],incInput+1,dev_input,1)!=CUBLAS_STATUS_SUCCESS)
errorStatus[numThread] = ERROR;
int grid = ceil((sizeDiag)/1024);
if(grid<=0)grid=1;
int BLOCK_SIZE=512;
int number_of_blocks = ((sizeDiag) + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 gridDim(number_of_blocks, 1);
dim3 blockDim(BLOCK_SIZE, 1);
setValue<<<gridDim, blockDim>>>(dev_output,1,sizeDiag);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cublasDaxpy(cublasHandler[numThread],sizeDiag,&alpha,dev_output,1,dev_input,1);
if(cublasGetVector(sizeDiag,sizeof(double),dev_input,1,&outputMatrix[offsetOut],incOutput+1)!=CUBLAS_STATUS_SUCCESS)
errorStatus[numThread] = ERROR;
cudaFree(dev_input);
cudaFree(dev_output);
}
deviceCount = tmpDeviceCount;
int index;
for(index=0;index<deviceCount;index++){
if(errorStatus[index] != OK ) return ERROR;
}
return OK;
}
int updateMatrix(double *outMatrix,int incOut,double *inMatrix,int incIn,double alpha,int rows,int cols){
/* This function implements the update of a matrix
B = a*A;
Core of this function is the cublasDscal that does this exact operation.
The blocking of the input matrix goes like this
Every thread (device) gets a number of rows that is responsible for the outcome
eg
| a11 a12 a13 .... a1N | <--- Gpu 1
| a21 a22 a23 .... a2N |
| a31 a32 a33 .... a3N | <--- Gpu 2
| a41 a42 a43 .... a4N |
| . . . .... . | etc
| aM1 aM2 aM3 .... aMN |
*/
int tmpDeviceCount = deviceCount;
if(rows < deviceCount) deviceCount =1;
int *errorStatus = (int*)malloc(deviceCount*sizeof(int));
omp_set_num_threads(deviceCount);
#pragma omp parallel
{
int numThread = omp_get_thread_num();
cudaSetDevice(numThread);
int sizeRows = rows / deviceCount;
int offsetIn = numThread*sizeRows*incIn;
int offsetOut = numThread*sizeRows*incOut;
if(numThread == deviceCount-1) sizeRows += rows%deviceCount;
double *dev_inMatrix;
cudaMalloc((void**)&dev_inMatrix,sizeRows*cols*sizeof(double));
cublasSetMatrix(cols,sizeRows,sizeof(double),&inMatrix[offsetIn],incIn,dev_inMatrix,cols);
cublasDscal(cublasHandler[numThread],sizeRows*cols,&alpha,dev_inMatrix,1);
cublasGetMatrix(cols,sizeRows,sizeof(double),dev_inMatrix,cols,(void*)&outMatrix[offsetOut],incOut);
cudaFree(dev_inMatrix);
}
deviceCount = tmpDeviceCount;
int index;
for(index=0;index<deviceCount;index++){
if(errorStatus[index] != OK ) return ERROR;
}
return OK;
}
int divisor(int number){
/* Just a nothing-to-say-about-function passing by */
int i;
for (i = number / 2; i >= 1; i--)
{
if (number % i == 0)
{
break;
}
}
return i;
}
void matrixMul(double* C,int ldc,double* A,int lda,double* B,int ldb,int m,int k,int n){
/* input A,B
output C
*/
/* This function implements the block matrix multiplication
Function divisor given a number a returns primitive number b so that b*c = a.
So, given a we know b and we find c. By those two numbers (b and c )we define the number of rows and columns
inside of a block.
After that every thread (device) takes an outcome-block and the needed income blocks and
eventually calling the GPU_strassen function for doing the multiplication.
*/
int numA,numB;
int stepA,stepB;
numA = divisor(deviceCount);
if(numA==0)
numA=1;
numB = deviceCount/numA;
stepA = m/numA; // Number of rows per block
stepB = n/numB; // Number of columns per block
int i,j; // for counters
int tmpDeviceCount=deviceCount;
if(deviceCount > m || deviceCount > n) deviceCount = 1;
omp_set_num_threads(deviceCount);
#pragma omp parallel for collapse(2)
for(i=0;i<numA;i++){
for(j=0;j<numB;j++){
unsigned int numThread = omp_get_thread_num();
if(cudaSetDevice(numThread)!=cudaSuccess){
printf("ERROR");
//exit;
}
if(i==numA-1 && j==numB-1){
GPU_strassen(cublasHandler[numThread],&A[i*stepA*lda],&B[j*stepB],&C[i*stepA*ldc+j*stepB],lda,ldb,ldc,stepA+m % numA,k,stepA+m % numA,k,stepB+n % numB,stepB+n % numB,1);
}else if(i==numA-1 && j!=numB-1){
GPU_strassen(cublasHandler[numThread],&A[i*stepA*lda],&B[j*stepB],&C[i*stepA*ldc+j*stepB],lda,ldb,ldc,stepA+m%numA,k,m%numA+stepA,k,stepB,stepB,1);
}else if(i!=numA-1 && j==numB-1){
GPU_strassen(cublasHandler[numThread],&A[i*stepA*lda],&B[j*stepB],&C[i*stepA*ldc+j*stepB],lda,ldb,ldc,stepA,k,stepA,k,stepB+n%numB,stepB+n%numB,1);
} else {
GPU_strassen(cublasHandler[numThread],&A[i*stepA*lda],&B[j*stepB],&C[i*stepA*ldc+j*stepB],lda,ldb,ldc,stepA,k,stepA,k,stepB,stepB,1);
}
}
}
deviceCount = tmpDeviceCount;
}
void initializeZero(double *X,int incX,int rows,int cols){
/* This function given an input matrix X, returns matrix X with all values set to 0.
Core of this function if the function cudaMemset()
*/
int tmpDeviceCount = deviceCount;
if(rows<deviceCount) deviceCount = 1;
//deviceCount=1;
omp_set_num_threads(deviceCount);
#pragma omp parallel
{
int numThread = omp_get_thread_num();
cudaSetDevice(numThread);
int sizeRows = rows / deviceCount;
int offsetX = numThread*sizeRows*incX;
if(numThread == deviceCount-1) sizeRows += rows % deviceCount;
double *dev_X;
cudaMalloc((void**)&dev_X,sizeRows*cols*sizeof(double));
cudaMemset(dev_X,0,sizeRows*cols*sizeof(double));
cublasGetMatrix(cols,sizeRows,sizeof(double),dev_X,cols,&X[offsetX],incX);
cudaFree(dev_X);
}
deviceCount = tmpDeviceCount;
}
void matrixPol(double *B,int incB,double *A,int incA,int rows,int cols,double* coef,int coefNum){
/**** B = f(A,coef); ****/
/* This function is responsible for the computational flow of the polynomial
Inside this function 2 additional matrices are been used
Step1: Calculate A^2
Step2: Caclulate first or first two orders of the polynomial,by doing that we are sure that inside the loop are even orders
Step3: Compute rest of the polynomial inside the loop
*/
double *tmpMatrix = (double*)malloc(rows*cols*sizeof(double));
double *A_2 = (double*)malloc(rows*cols*sizeof(double));
matrixMul(A_2,cols,A,incA,A,incA,rows,cols,cols); /* A_2 = A*A */
int loopStart;
if( (coefNum % 2) == 0 ) {
/* if polynomial order is even compute the aI + bX */
updateMatrix(B,incB,A,incA,coef[coefNum-1],rows,cols);
updateDiag(B,incB,B,incB,coef[coefNum-2],rows,cols);
loopStart=coefNum-3;
}else{
/* if polynomial order is odd compute the aI */
initializeZero(tmpMatrix,cols,rows,cols);
updateDiag(B,incB,tmpMatrix,cols,coef[coefNum-1],rows,cols);
loopStart=coefNum-2;
}
int i;
for(i =loopStart;i>=0;i=i-2){
/*Rest of the polynomial orders are computed here */
matrixMul(B,incB,A_2,cols,B,incB,rows,cols,cols); /*B = X_2*B */
updateMatrix(tmpMatrix,cols,A,incA,coef[i],rows,cols);/* a*X */
updateDiag(tmpMatrix,cols,tmpMatrix,cols,coef[i-1],rows,cols); /* b*I+a*X */
matrixAdd(B,incB,B,incB,tmpMatrix,cols,rows,cols); /* B =B + b*I+a*X */
}
free(tmpMatrix);
free(A_2);
}
|
c7e711c68cc56c094df83aba1f08da3b312c0bfc.hip | // !!! This is a file automatically generated by hipify!!!
// This is a personal academic project. Dear PVS-Studio, please check it.
// PVS-Studio Static Code Analyzer for C, C++ and C#: http://www.viva64.com
#include "coloringMCMC.h"
/*
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::__printMemAlloc() {
//https://stackoverflow.com/questions/34356768/managing-properly-an-array-of-results-that-is-larger-than-the-memory-available-a
//colorsChecker_d e orderedColors_d
//size_t total_mem, free_mem;
//hipMemGetInfo(&free_mem, &total_mem);
//std::cout << "total mem: " << total_mem << " free mem:" << free_mem << std::endl;
//int tot = nnodes * sizeof(uint32_t) * 3;
//std::cout << "nnodes * sizeof(uint32_t): " << nnodes * sizeof(uint32_t) << " X 3" << std::endl;
//tot += nnodes * sizeof(float) * 2;
//std::cout << "nnodes * sizeof(float): " << nnodes * sizeof(float) << " X 2" << std::endl;
//tot += nnodes * param.nCol * sizeof(bool);t
//std::cout << "nnodes * param.nCol * sizeof(bool): " << nnodes * param.nCol * sizeof(bool) << " X 1" << std::endl;
//tot += nnodes * param.nCol * sizeof(uint32_t);
//std::cout << "nnodes * param.nCol * sizeof(uint32_t): " << nnodes * param.nCol * sizeof(uint32_t) << " X 1" << std::endl;
//std::cout << "TOTALE: " << tot << " bytes" << std::endl;
}
*/
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::__customPrintRun0_start(int iteration) {
LOG(TRACE) << std::endl << "ColoringMCMC GPU";
LOG(TRACE) << "numCol: " << param.nCol;
LOG(TRACE) << "epsilon: " << param.epsilon;
LOG(TRACE) << "lambda: " << param.lambda;
LOG(TRACE) << "ratioFreezed: " << param.ratioFreezed;
LOG(TRACE) << "maxRip: " << param.maxRip << std::endl;
LOG(TRACE) << "numColorRatio: " << param.numColorRatio;
logFile.open(directory + ".log");
colorsFile.open(directory + "-colors.txt");
size_t total_mem, free_mem;
hipMemGetInfo(&free_mem, &total_mem);
logFile << "total memory: " << total_mem << " free memory:" << free_mem << std::endl;
logFile << "numCol: " << param.nCol << std::endl;
logFile << "epsilon: " << param.epsilon << std::endl;
logFile << "lambda: " << param.lambda << std::endl;
logFile << "ratioFreezed: " << param.ratioFreezed << std::endl;
logFile << "maxRip: " << param.maxRip << std::endl << std::endl;
logFile << "numColorRatio: " << param.numColorRatio << std::endl;
}
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::__customPrintRun1_init() {
LOG(TRACE) << "COLORAZIONE INIZIALE";
logFile << "COLORAZIONE INIZIALE" << std::endl;
getStatsNumColors("start_");
LOG(TRACE) << std::endl << "end colorazione iniziale -------------------------------------------------------------------" << std::endl << std::endl;
logFile << std::endl << "end colorazione iniziale -------------------------------------------------------------------" << std::endl << std::endl;
}
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::__customPrintRun2_conflicts(bool isTailCutting) {
LOG(TRACE) << "***** Tentativo numero: " << rip;
if (isTailCutting)
LOG(TRACE) << "---> TailCutting";
LOG(TRACE) << "conflitti rilevati: " << conflictCounter;
logFile << "***** Tentativo numero: " << rip << std::endl;
if (isTailCutting)
logFile << "---> TailCutting" << std::endl;
logFile << "conflitti rilevati: " << conflictCounter << std::endl;
}
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::__customPrintRun3_newConflicts() {
LOG(TRACE) << "nuovi conflitti rilevati: " << conflictCounterStar;
logFile << "nuovi conflitti rilevati: " << conflictCounterStar << std::endl;
getStatsFreeColors();
}
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::__customPrintRun5() {
LOG(TRACE) << "lambda: " << -param.lambda;
LOG(TRACE) << "probs p: " << p << " pStar:" << pStar;
LOG(TRACE) << "left(no lambda): " << conflictCounterStar - conflictCounter << " right:" << p - pStar;
LOG(TRACE) << "result: " << result;
LOG(TRACE) << "random: " << random;
}
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::__customPrintRun6_change() {
LOG(TRACE) <<"CHANGE";
logFile << "CHANGE" << std::endl;
}
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::__customPrintRun7_end() {
std::string maxIteration = rip < param.maxRip ? "no" : "yes";
LOG(TRACE) << "COLORAZIONE FINALE";
LOG(TRACE) << "Time " << duration;
LOG(TRACE) << "Max iteration reached " << maxIteration;
logFile << "COLORAZIONE FINALE" << std::endl;
logFile << "Time " << duration << std::endl;
logFile << "Max iteration reached " << maxIteration << std::endl;
getStatsNumColors("end_");
LOG(TRACE) << std::endl << "end colorazione finale -------------------------------------------------------------------";
logFile << std::endl << "end colorazione finale -------------------------------------------------------------------" << std::endl << std::endl;
logFile.close();
colorsFile.close();
}
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::getStatsFreeColors() {
uint32_t statsFreeColors_max, statsFreeColors_min, statsFreeColors_avg;
cuSts = hipMemcpy(statsColors_h, statsFreeColors_d, nnodes * sizeof(uint32_t), hipMemcpyDeviceToHost); cudaCheck(cuSts, __FILE__, __LINE__);
statsFreeColors_max = statsFreeColors_avg = 0;
statsFreeColors_min = param.nCol + 1;
for (uint32_t i = 0; i < nnodes; i++) {
uint32_t freeColors = statsColors_h[i];
statsFreeColors_avg += freeColors;
statsFreeColors_max = (freeColors > statsFreeColors_max) ? freeColors : statsFreeColors_max;
statsFreeColors_min = (freeColors < statsFreeColors_min) ? freeColors : statsFreeColors_min;
}
statsFreeColors_avg /= (float)nnodes;
LOG(TRACE) << "Max Free Colors: " << statsFreeColors_max << " - Min Free Colors: " << statsFreeColors_min << " - AVG Free Colors: " << statsFreeColors_avg;
}
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::getStatsNumColors(std::string prefix) {
cuSts = hipMemcpy(coloring_h, coloring_d, nnodes * sizeof(uint32_t), hipMemcpyDeviceToHost); cudaCheck(cuSts, __FILE__, __LINE__);
memset(statsColors_h, 0, nnodes * sizeof(uint32_t));
for (int i = 0; i < nnodes; i++)
statsColors_h[coloring_h[i]]++;
int counter = 0;
int max_i = 0, min_i = nnodes;
int max_c = 0, min_c = nnodes;
int numberOfCol = param.nCol;
float average = 0, variance = 0, standardDeviation, balancingIndex = 0;
average = (float)nnodes / numberOfCol;
for (int i = 0; i < numberOfCol; i++)
{
if (statsColors_h[i] > 0) {
counter++;
if (statsColors_h[i] > max_c) {
max_i = i;
max_c = statsColors_h[i];
}
if (statsColors_h[i] < min_c) {
min_i = i;
min_c = statsColors_h[i];
}
balancingIndex += pow(statsColors_h[i] - average, 2.f);
}
}
balancingIndex /= (nnodes * prob);
balancingIndex = sqrtf(balancingIndex);
for (int i = 0; i < numberOfCol; i++) {
variance += pow((statsColors_h[i] - average), 2.f);
}
variance /= numberOfCol;
standardDeviation = sqrt(variance);
#ifdef PRINTHISTOGRAM
int divider = (max_c / (param.nCol / 3) > 0) ? max_c / (param.nCol / 3) : 1;
for (int i = 0; i < numberOfCol; i++)
{
std::cout << "Color " << i << " ";
std::string linea;
for (int j = 0; j < statsColors_h[i] / divider; j++)
linea += "*";
LOG(TRACE) << linea;
}
LOG(TRACE) <<"Every * is " << divider << " nodes";
#endif // PRINTHISTOGRAM
LOG(TRACE) << "Number of used colors is " << counter << " on " << numberOfCol << " available";
LOG(TRACE) << "Most used colors is " << max_i << " used " << max_c << " times";
LOG(TRACE) << "Least used colors is " << min_i << " used " << min_c << " times";
LOG(TRACE) << "Average " << average;
LOG(TRACE) << "Variance " << variance;
LOG(TRACE) << "StandardDeviation " << standardDeviation;
LOG(TRACE) << "BalancingIndex " << balancingIndex;
//LOG(TRACE) << "Colors average " << cAverage << std::endl;
//LOG(TRACE) << "Colors variance " << cVariance << std::endl;
//LOG(TRACE) << "Colors standardDeviation " << cStandardDeviation << std::endl;
if (prefix == "end_") {
for (int i = 0; i < nnodes; i++)
colorsFile << i << " " << coloring_h[i] << std::endl;
}
#ifdef PRINTHISTOGRAM
for (int i = 0; i < numberOfCol; i++)
{
logFile << "Color " << i << " ";
for (int j = 0; j < statsColors_h[i] / divider; j++)
logFile << "*";
logFile << std::endl;
}
logFile << "Every * is " << divider << " nodes" << std::endl;
logFile << std::endl;
#endif //PRINTHISTOGRAM
logFile << "Number of used colors is " << counter << " on " << numberOfCol << " available" << std::endl;
logFile << "Most used colors is " << max_i << " used " << max_c << " times" << std::endl;
logFile << "Least used colors is " << min_i << " used " << min_c << " times" << std::endl;
logFile << std::endl;
logFile << "Average " << average << std::endl;
logFile << "Variance " << variance << std::endl;
logFile << "StandardDeviation " << standardDeviation << std::endl;
logFile << "BalancingIndex " << balancingIndex << std::endl;
//logFile << std::endl;
//logFile << "Colors average " << cAverage << std::endl;
//logFile << "Colors variance " << cVariance << std::endl;
//logFile << "Colors standardDeviation " << cStandardDeviation << std::endl;
logFile << std::endl;
}
//// Questo serve per mantenere le dechiarazioni e definizioni in classi separate
//// E' necessario aggiungere ogni nuova dichiarazione per ogni nuova classe tipizzata usata nel main
template class ColoringMCMC<col, col>;
template class ColoringMCMC<float, float>;
| c7e711c68cc56c094df83aba1f08da3b312c0bfc.cu | // This is a personal academic project. Dear PVS-Studio, please check it.
// PVS-Studio Static Code Analyzer for C, C++ and C#: http://www.viva64.com
#include "coloringMCMC.h"
/*
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::__printMemAlloc() {
//https://stackoverflow.com/questions/34356768/managing-properly-an-array-of-results-that-is-larger-than-the-memory-available-a
//colorsChecker_d e orderedColors_d
//size_t total_mem, free_mem;
//cudaMemGetInfo(&free_mem, &total_mem);
//std::cout << "total mem: " << total_mem << " free mem:" << free_mem << std::endl;
//int tot = nnodes * sizeof(uint32_t) * 3;
//std::cout << "nnodes * sizeof(uint32_t): " << nnodes * sizeof(uint32_t) << " X 3" << std::endl;
//tot += nnodes * sizeof(float) * 2;
//std::cout << "nnodes * sizeof(float): " << nnodes * sizeof(float) << " X 2" << std::endl;
//tot += nnodes * param.nCol * sizeof(bool);t
//std::cout << "nnodes * param.nCol * sizeof(bool): " << nnodes * param.nCol * sizeof(bool) << " X 1" << std::endl;
//tot += nnodes * param.nCol * sizeof(uint32_t);
//std::cout << "nnodes * param.nCol * sizeof(uint32_t): " << nnodes * param.nCol * sizeof(uint32_t) << " X 1" << std::endl;
//std::cout << "TOTALE: " << tot << " bytes" << std::endl;
}
*/
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::__customPrintRun0_start(int iteration) {
LOG(TRACE) << std::endl << "ColoringMCMC GPU";
LOG(TRACE) << "numCol: " << param.nCol;
LOG(TRACE) << "epsilon: " << param.epsilon;
LOG(TRACE) << "lambda: " << param.lambda;
LOG(TRACE) << "ratioFreezed: " << param.ratioFreezed;
LOG(TRACE) << "maxRip: " << param.maxRip << std::endl;
LOG(TRACE) << "numColorRatio: " << param.numColorRatio;
logFile.open(directory + ".log");
colorsFile.open(directory + "-colors.txt");
size_t total_mem, free_mem;
cudaMemGetInfo(&free_mem, &total_mem);
logFile << "total memory: " << total_mem << " free memory:" << free_mem << std::endl;
logFile << "numCol: " << param.nCol << std::endl;
logFile << "epsilon: " << param.epsilon << std::endl;
logFile << "lambda: " << param.lambda << std::endl;
logFile << "ratioFreezed: " << param.ratioFreezed << std::endl;
logFile << "maxRip: " << param.maxRip << std::endl << std::endl;
logFile << "numColorRatio: " << param.numColorRatio << std::endl;
}
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::__customPrintRun1_init() {
LOG(TRACE) << "COLORAZIONE INIZIALE";
logFile << "COLORAZIONE INIZIALE" << std::endl;
getStatsNumColors("start_");
LOG(TRACE) << std::endl << "end colorazione iniziale -------------------------------------------------------------------" << std::endl << std::endl;
logFile << std::endl << "end colorazione iniziale -------------------------------------------------------------------" << std::endl << std::endl;
}
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::__customPrintRun2_conflicts(bool isTailCutting) {
LOG(TRACE) << "***** Tentativo numero: " << rip;
if (isTailCutting)
LOG(TRACE) << "---> TailCutting";
LOG(TRACE) << "conflitti rilevati: " << conflictCounter;
logFile << "***** Tentativo numero: " << rip << std::endl;
if (isTailCutting)
logFile << "---> TailCutting" << std::endl;
logFile << "conflitti rilevati: " << conflictCounter << std::endl;
}
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::__customPrintRun3_newConflicts() {
LOG(TRACE) << "nuovi conflitti rilevati: " << conflictCounterStar;
logFile << "nuovi conflitti rilevati: " << conflictCounterStar << std::endl;
getStatsFreeColors();
}
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::__customPrintRun5() {
LOG(TRACE) << "lambda: " << -param.lambda;
LOG(TRACE) << "probs p: " << p << " pStar:" << pStar;
LOG(TRACE) << "left(no lambda): " << conflictCounterStar - conflictCounter << " right:" << p - pStar;
LOG(TRACE) << "result: " << result;
LOG(TRACE) << "random: " << random;
}
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::__customPrintRun6_change() {
LOG(TRACE) <<"CHANGE";
logFile << "CHANGE" << std::endl;
}
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::__customPrintRun7_end() {
std::string maxIteration = rip < param.maxRip ? "no" : "yes";
LOG(TRACE) << "COLORAZIONE FINALE";
LOG(TRACE) << "Time " << duration;
LOG(TRACE) << "Max iteration reached " << maxIteration;
logFile << "COLORAZIONE FINALE" << std::endl;
logFile << "Time " << duration << std::endl;
logFile << "Max iteration reached " << maxIteration << std::endl;
getStatsNumColors("end_");
LOG(TRACE) << std::endl << "end colorazione finale -------------------------------------------------------------------";
logFile << std::endl << "end colorazione finale -------------------------------------------------------------------" << std::endl << std::endl;
logFile.close();
colorsFile.close();
}
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::getStatsFreeColors() {
uint32_t statsFreeColors_max, statsFreeColors_min, statsFreeColors_avg;
cuSts = cudaMemcpy(statsColors_h, statsFreeColors_d, nnodes * sizeof(uint32_t), cudaMemcpyDeviceToHost); cudaCheck(cuSts, __FILE__, __LINE__);
statsFreeColors_max = statsFreeColors_avg = 0;
statsFreeColors_min = param.nCol + 1;
for (uint32_t i = 0; i < nnodes; i++) {
uint32_t freeColors = statsColors_h[i];
statsFreeColors_avg += freeColors;
statsFreeColors_max = (freeColors > statsFreeColors_max) ? freeColors : statsFreeColors_max;
statsFreeColors_min = (freeColors < statsFreeColors_min) ? freeColors : statsFreeColors_min;
}
statsFreeColors_avg /= (float)nnodes;
LOG(TRACE) << "Max Free Colors: " << statsFreeColors_max << " - Min Free Colors: " << statsFreeColors_min << " - AVG Free Colors: " << statsFreeColors_avg;
}
template<typename nodeW, typename edgeW>
void ColoringMCMC<nodeW, edgeW>::getStatsNumColors(std::string prefix) {
cuSts = cudaMemcpy(coloring_h, coloring_d, nnodes * sizeof(uint32_t), cudaMemcpyDeviceToHost); cudaCheck(cuSts, __FILE__, __LINE__);
memset(statsColors_h, 0, nnodes * sizeof(uint32_t));
for (int i = 0; i < nnodes; i++)
statsColors_h[coloring_h[i]]++;
int counter = 0;
int max_i = 0, min_i = nnodes;
int max_c = 0, min_c = nnodes;
int numberOfCol = param.nCol;
float average = 0, variance = 0, standardDeviation, balancingIndex = 0;
average = (float)nnodes / numberOfCol;
for (int i = 0; i < numberOfCol; i++)
{
if (statsColors_h[i] > 0) {
counter++;
if (statsColors_h[i] > max_c) {
max_i = i;
max_c = statsColors_h[i];
}
if (statsColors_h[i] < min_c) {
min_i = i;
min_c = statsColors_h[i];
}
balancingIndex += pow(statsColors_h[i] - average, 2.f);
}
}
balancingIndex /= (nnodes * prob);
balancingIndex = sqrtf(balancingIndex);
for (int i = 0; i < numberOfCol; i++) {
variance += pow((statsColors_h[i] - average), 2.f);
}
variance /= numberOfCol;
standardDeviation = sqrt(variance);
#ifdef PRINTHISTOGRAM
int divider = (max_c / (param.nCol / 3) > 0) ? max_c / (param.nCol / 3) : 1;
for (int i = 0; i < numberOfCol; i++)
{
std::cout << "Color " << i << " ";
std::string linea;
for (int j = 0; j < statsColors_h[i] / divider; j++)
linea += "*";
LOG(TRACE) << linea;
}
LOG(TRACE) <<"Every * is " << divider << " nodes";
#endif // PRINTHISTOGRAM
LOG(TRACE) << "Number of used colors is " << counter << " on " << numberOfCol << " available";
LOG(TRACE) << "Most used colors is " << max_i << " used " << max_c << " times";
LOG(TRACE) << "Least used colors is " << min_i << " used " << min_c << " times";
LOG(TRACE) << "Average " << average;
LOG(TRACE) << "Variance " << variance;
LOG(TRACE) << "StandardDeviation " << standardDeviation;
LOG(TRACE) << "BalancingIndex " << balancingIndex;
//LOG(TRACE) << "Colors average " << cAverage << std::endl;
//LOG(TRACE) << "Colors variance " << cVariance << std::endl;
//LOG(TRACE) << "Colors standardDeviation " << cStandardDeviation << std::endl;
if (prefix == "end_") {
for (int i = 0; i < nnodes; i++)
colorsFile << i << " " << coloring_h[i] << std::endl;
}
#ifdef PRINTHISTOGRAM
for (int i = 0; i < numberOfCol; i++)
{
logFile << "Color " << i << " ";
for (int j = 0; j < statsColors_h[i] / divider; j++)
logFile << "*";
logFile << std::endl;
}
logFile << "Every * is " << divider << " nodes" << std::endl;
logFile << std::endl;
#endif //PRINTHISTOGRAM
logFile << "Number of used colors is " << counter << " on " << numberOfCol << " available" << std::endl;
logFile << "Most used colors is " << max_i << " used " << max_c << " times" << std::endl;
logFile << "Least used colors is " << min_i << " used " << min_c << " times" << std::endl;
logFile << std::endl;
logFile << "Average " << average << std::endl;
logFile << "Variance " << variance << std::endl;
logFile << "StandardDeviation " << standardDeviation << std::endl;
logFile << "BalancingIndex " << balancingIndex << std::endl;
//logFile << std::endl;
//logFile << "Colors average " << cAverage << std::endl;
//logFile << "Colors variance " << cVariance << std::endl;
//logFile << "Colors standardDeviation " << cStandardDeviation << std::endl;
logFile << std::endl;
}
//// Questo serve per mantenere le dechiarazioni e definizioni in classi separate
//// E' necessario aggiungere ogni nuova dichiarazione per ogni nuova classe tipizzata usata nel main
template class ColoringMCMC<col, col>;
template class ColoringMCMC<float, float>;
|
5183ff9e39477705eefa3363cc7439f60326e75f.hip | // !!! This is a file automatically generated by hipify!!!
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <math.h>
#include "sceneStructs.h"
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
//#define rayTracer 1
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
struct is_dead{
__host__ __device__ bool operator()(const ray& r)
{
return r.isDead;
}
};
// LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
// Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
// TODO: IMPLEMENT THIS FUNCTION
// Function that does the initial raycast from the camera
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){
ray r;
glm::vec3 a = glm::normalize(glm::cross(view, up));
glm::vec3 b = glm::normalize(glm::cross(view, a));
glm::vec3 H = a * glm::length(view) * glm::tan(glm::radians(fov.x));
glm::vec3 V = b * glm::length(view) * glm::tan(glm::radians(fov.y));
glm::vec3 M = eye + view;
glm::vec3 rayDes = M + (2*((float)x/(resolution.x-1)) - 1)*H + (2*((float)y/(resolution.y-1)) - 1)*V;
//get the ray direction from eye to the destination
glm::vec3 thisRay = rayDes - eye;
r.direction = glm::normalize(thisRay);
r.origin = eye;
r.tempColor = glm::vec3(1.0f);
r.isDead = false;
return r;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image, float iterations){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0f/iterations;
color.y = image[index].y*255.0f/iterations;
color.z = image[index].z*255.0f/iterations; //weight for each iteration
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
// loop through all geometry to test ray intersection, returns the geoID that corresponds to intersected geometry
__host__ __device__ int intersectTest(ray r, glm::vec3& intersect, glm::vec3& normal, staticGeom* geoms, int numberOfGeoms, triangle * cudatris){
if(r.isDead)
return -1; //didn't hit anything
float distMin = -2, dist = -1;
glm::vec3 tempIntersect(0.0f);
glm::vec3 tempNormal(0.0f);
int ID = -1;
for (int g=0; g<numberOfGeoms; g++){
if(geoms[g].type == SPHERE){
dist = sphereIntersectionTest(geoms[g], r, tempIntersect, tempNormal);
}
else if(geoms[g].type == CUBE ){
dist = boxIntersectionTest(geoms[g], r, tempIntersect, tempNormal);
}
else if (geoms[g].type == MESH){
dist = polygonIntersectionTest(geoms[g], r, tempIntersect, tempNormal, cudatris);
}
if( (distMin < 0 && dist > -0.5f ) || ( distMin > -1 && dist < distMin && dist > -0.5f ) ){
distMin = dist;
ID = g;
intersect = tempIntersect;
normal = tempNormal;
}
}
return ID;
}
//return true if ray directly hit lights
__host__ __device__ bool LightRayTest(ray r, staticGeom* geoms, int numberOfGeoms, material* materials, triangle * cudatris){
glm::vec3 intersPoint(0.0f);
glm::vec3 intersNormal(0.0f);
//printf("shadow ray: [%f,%f,%f], [%f,%f,%f]\n", sr.origin.x,sr.origin.y,sr.origin.z,sr.direction.x,sr.direction.y,sr.direction.z);
int geoID = intersectTest(r, intersPoint, intersNormal, geoms, numberOfGeoms, cudatris);
if( geoID>-1 && materials[geoms[geoID].materialid].emittance > 0){ //hit light soource
return true;
}
else{
return false;
}
}
//calculates the direct lighting for a certain hit point and modify color of that hit
__device__ __host__ void directLighting(float seed, glm::vec3& theColor, glm::vec3& theIntersect, glm::vec3& theNormal, int geoID, int* lights, int numOfLights, material* cudamats, staticGeom* geoms, int numOfGeoms, triangle * cudatris){
ray shadowRay;
float rayLen;
float lightArea;
glm::vec3 lightNormal;
int chosenLight = lights[0];
if( numOfLights > 1){
thrust::default_random_engine rng(hash(seed));
thrust::uniform_real_distribution<float> u01(0,1);
chosenLight = lights[(int)((float)u01(rng) * numOfLights)];
}
glm::vec3 Plight;
if( geoms[chosenLight].type == CUBE ){
Plight = getRandomPointOnCube( geoms[chosenLight], seed);
}
else if( geoms[chosenLight].type == SPHERE ){
Plight = getRandomPointOnSphere( geoms[chosenLight], seed);
}
shadowRay.direction = glm::normalize(Plight - theIntersect);
shadowRay.origin = theIntersect + (float)EPSILON * shadowRay.direction;
int lightID = glm::length(Plight - theIntersect);
material curMat = cudamats[geoms[geoID].materialid]; //material of the hit goemetry
if(LightRayTest(shadowRay, geoms, numOfGeoms, cudamats, cudatris)){
float cosTerm = glm::clamp( glm::dot( theNormal, shadowRay.direction ), 0.0f, 1.0f); //proportion of facing light
float cosTerm2 = glm::clamp( glm::dot( lightNormal, -shadowRay.direction ), 0.0f, 1.0f); //proportion of incoming light
float areaSampling = lightArea / (float) pow( rayLen, 2.0f) ; // dA/r^2
theColor += cudamats[lightID].emittance * curMat.color * cosTerm * cosTerm2 * areaSampling ;
}
}
#ifdef rayTracer
//TODO: IMPLEMENT THIS FUNCTION
//Core raytracer kernel (recursive)
__host__ __device__ glm::vec3 raytraceRecursive(ray r, int iteration, float currentIndexOfRefraction, int depth, int maximumDepth, staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials, light* lightSources, int numberOfLights){
glm::vec3 bgColor(0.0f);
glm::vec3 ambientColor(1.0f);
glm::vec3 phongColor(0.0f), reflColor(0.0f), refraColor(0.0f);;
glm::vec3 returnColor(0.0f);
float ka = 0.2f;
if(depth > maximumDepth)
return bgColor;
// intersection test
glm::vec3 intersectionPoint, intersectionNormal;
int intersIndex = rayIntersect(r, geoms, numberOfGeoms, intersectionPoint, intersectionNormal, materials);
material mat = materials[geoms[intersIndex].materialid];
if(intersIndex == -1) return bgColor;
else if(mat.emittance > 0.0f){ // intersected with light source geometry
returnColor = mat.color;
}
else{ // intersected with actual geometry
// returnColor = ka * ambientColor * materials[geoms[intersIndex].materialid].color;
if(/*iteration == 0 && */materials[geoms[intersIndex].materialid].hasRefractive == 1)
{
float nextIndexOfRefraction = 1.0f;
glm::vec3 refraDir;
if(abs(currentIndexOfRefraction - 1) < 0.00001f) // current ray is in air
{
refraDir = calculateRefractionDirection(r.direction, intersectionNormal, currentIndexOfRefraction, materials[geoms[intersIndex].materialid].indexOfRefraction, nextIndexOfRefraction);
}
else // current ray is in glass
{
refraDir = calculateRefractionDirection(r.direction, -intersectionNormal, currentIndexOfRefraction, 1.0f, nextIndexOfRefraction);
}
ray refraRay;
refraRay.origin = intersectionPoint + 0.01f * refraDir;
refraRay.direction = refraDir;
refraColor = raytraceRecursive(refraRay, iteration, nextIndexOfRefraction, depth + 1, maximumDepth, geoms, numberOfGeoms, materials, numberOfMaterials, lightSources, numberOfLights);
returnColor += refraColor;
}
if(materials[geoms[intersIndex].materialid].hasReflective == 1)
{
glm::vec3 reflDir = calculateReflectionDirection(intersectionNormal, r.direction);
ray reflRay;
reflRay.origin = intersectionPoint + 0.01f * reflDir;
reflRay.direction = reflDir;
reflColor = raytraceRecursive(reflRay, iteration, 1.0f, depth + 1, maximumDepth, geoms, numberOfGeoms, materials, numberOfMaterials, lightSources, numberOfLights);
returnColor += reflColor;
}
if(iteration < numberOfLights){
if(ShadowRayUnblocked(intersectionPoint, lightSources[iteration].position, geoms, numberOfGeoms, materials))
{
glm::vec3 L = glm::normalize(lightSources[iteration].position - intersectionPoint);
float dot1 = glm::clamp(glm::dot(intersectionNormal, L), 0.0f, 1.0f);
float dot2 = glm::dot(calculateReflectionDirection(intersectionNormal, -L) ,-r.direction);
glm::vec3 diffuse = lightSources[iteration].color * 0.5f * materials[geoms[intersIndex].materialid].color * dot1;
glm::vec3 specular;
if(abs(materials[geoms[intersIndex].materialid].specularExponent) > 1e-6)
specular = lightSources[iteration].color * 0.1f * pow(glm::max(dot2, 0.0f), materials[geoms[intersIndex].materialid].specularExponent);
phongColor += diffuse + specular;
}
}
returnColor += (5.0f / numberOfLights) * (0.1f * (float)numberOfLights * reflColor + (float)numberOfLights * refraColor);
}
return returnColor;
}
__global__ void raytracePrimary(glm::vec2 resolution, int time, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials, int* lightSources, int numberOfLights){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
//on every thread, get color for any pixel given pixel(x,y) and camera
if((x<=resolution.x && y<=resolution.y)){
int init_depth = 0;
ray r = raycastFromCameraKernel(resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov);
colors[index] += raytraceRecursive(r, time, 1.0f, init_depth, rayDepth, geoms, numberOfGeoms, materials, numberOfMaterials, lightSources, numberOfLights);
}
}
#endif
// TODO: IMPLEMENT THIS FUNCTION
// Core path tracer kernel
__global__ void pathtraceRay(ray* rays, float time, int rayDepth, int numOfRays, glm::vec3* colors, staticGeom* geoms, int numberOfGeoms, material* cudamats, int* lights, int numOfLights, cameraData cam, triangle* cudatris){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (int)ceil(sqrt((float)numOfRays))* y;
if( index < numOfRays ){
float seed = (float)index * (float)time * ( (float)rayDepth + 1.0f );
ray r = rays[index];
glm::vec3 Pintersect(0.0f);
glm::vec3 Pnormal(0.0f);
int intersIndex = intersectTest(r, Pintersect, Pnormal, geoms, numberOfGeoms, cudatris);
if(intersIndex!=-1){
material curMat = cudamats[geoms[intersIndex].materialid];
if( curMat.emittance > 0 ){ //ray ends when hit light source
colors[r.pixelIndex] += r.tempColor * curMat.color * curMat.emittance;
r.isDead = true;
}
else{ // for reflection and refraction effect
if(curMat.hasReflective > 0 || curMat.hasRefractive > 0){
Fresnel Fres;
float reflectance;
glm::vec3 reflectDir, transmitDir;
if(glm::dot(r.direction,Pnormal)<0){ //ray is outside
Fres = calculateFresnel(Pnormal,r.direction,1.0f, curMat.indexOfRefraction);
reflectDir = calculateReflectionDirection(Pnormal, r.direction);
transmitDir = calculateTransmissionDirection(Pnormal, r.direction, 1.0f, curMat.indexOfRefraction);
}
else{ //ray is inside
Fres = calculateFresnel(-Pnormal,r.direction, curMat.indexOfRefraction, 1.0f);
reflectDir = calculateReflectionDirection(-Pnormal, r.direction);
transmitDir = calculateTransmissionDirection(-Pnormal, r.direction, curMat.indexOfRefraction, 1.0f);
}
if( curMat.hasRefractive > 0 && curMat.hasReflective > 0){
thrust::default_random_engine rng( hash( seed ) );
thrust::uniform_real_distribution<float> u01(0,1);
if((float) u01(rng) < Fres.reflectionCoefficient ){ //reflected
r.direction = reflectDir;
}
else{ //transmitted
r.direction = transmitDir;
}
}
else if(curMat.hasReflective > 0){
r.direction = reflectDir;
}
else if (curMat.hasRefractive > 0){
r.direction = transmitDir;
}
r.origin = Pintersect + (float)EPSILON * r.direction;
if(glm::length(curMat.color)>0)
r.tempColor *= curMat.color ;
}
else{
thrust::default_random_engine rng(hash(seed));
thrust::uniform_real_distribution<float> u01(0,1);
if((float) u01(rng) < 0.01f ){ //direct light
directLighting(seed,r.tempColor,Pintersect,Pnormal,intersIndex,lights,numOfLights, cudamats,geoms, numberOfGeoms, cudatris);
}
else{
//cos weighted
r.direction = calculateCosWeightedRandomDirInHemisphere(Pnormal, (float) u01(rng), (float) u01(rng));
r.origin = Pintersect + (float)EPSILON * r.direction ;
float diffuseTerm = glm::clamp( glm::dot( Pnormal,r.direction ), 0.0f, 1.0f);
r.tempColor *= diffuseTerm * curMat.color;
}
}
}
}
else{ //if ray hit nothing
r.isDead = true;
}
rays[index] = r;
}
}
//initialize the ray pool for cudarays
__global__ void generateRaypool(ray * rayPool, cameraData cam, float iterations,glm::vec3 *colors, staticGeom* geoms, int numberOfGeoms, material* cudamats, int * lightIDs, int numberOfLights, triangle * cudatris){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * cam.resolution.x);
//ray r = rayPool[index];
if( x<= cam.resolution.x && y <= cam.resolution.y ){
ray r = raycastFromCameraKernel( cam.resolution, iterations, x, y, cam.position, cam.view, cam.up, cam.fov );
r.pixelIndex = index;
if(DEPTH_OF_FIELD){
glm::vec3 focalPoint = r.origin + r.direction * cam.focalLength / glm::dot(cam.view, r.direction); //L = f/cos(theta)
thrust::default_random_engine rng(hash((float)index*iterations));
thrust::uniform_real_distribution<float> u01(0,1);
float theta = 2.0f * PI * u01(rng);
float radius = u01(rng) * cam.aperture;
glm::vec3 eyeOffset(cos(theta)*radius, sin(theta)*radius, 0);
glm::vec3 newEyePoint = cam.position + eyeOffset;
r.origin = newEyePoint;
r.direction = glm::normalize(focalPoint - newEyePoint);
}
glm::vec3 Pintersect(0.0f);
glm::vec3 Pnormal(0.0f);
int geoID = intersectTest(r, Pintersect, Pnormal, geoms, numberOfGeoms, cudatris);
if( geoID > -1){
directLighting((float)index*iterations, colors[index], Pintersect, Pnormal,geoID, lightIDs, numberOfLights, cudamats, geoms, numberOfGeoms, cudatris);
}
rayPool[index] = r;
}
}
// TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaPathTraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms){
int traceDepth = 10; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
// send image to GPU
glm::vec3* cudaimage = NULL;
hipMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
hipMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyHostToDevice);
// package geometry and materials and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
int meshID = -1;
triangle* cudatris = NULL;
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
if(geoms[i].type == MESH){
meshID = i; // my code now only handles one obj load (unfortunately as I am not able to handle list of triangles well)
newStaticGeom.boundingBoxMax = geoms[i].boundingBoxMax; //bBox is in local coordinates, dont change over frames.
newStaticGeom.boundingBoxMin = geoms[i].boundingBoxMin;
newStaticGeom.numOfTris = geoms[i].numOfTris;
hipMalloc((void**)&cudatris, geoms[meshID].numOfTris*sizeof(triangle));
hipMemcpy( cudatris, geoms[meshID].tris, geoms[meshID].numOfTris *sizeof(triangle), hipMemcpyHostToDevice);
}
geomList[i] = newStaticGeom;
}
staticGeom* cudageoms = NULL;
hipMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
hipMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), hipMemcpyHostToDevice);
// package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
cam.aperture = renderCam->aperture;
cam.focalLength = renderCam->focalLength;
// material setup
material* cudamats = NULL;
hipMalloc((void**)&cudamats, numberOfMaterials*sizeof(material));
hipMemcpy( cudamats, materials, numberOfMaterials*sizeof(material), hipMemcpyHostToDevice);
//lights setup
int numberOfLights = 0;
for(int i = 0; i < numberOfGeoms; ++i){
if(materials[geoms[i].materialid].emittance > 0){
numberOfLights ++ ;
}
}
int *lightIDs = new int[numberOfLights];
int k = 0;
for(int i = 0; i < numberOfGeoms; ++i){
if(materials[geoms[i].materialid].emittance > 0){
lightIDs[k] = i;
k++;
}
}
int* cudalightIDs = NULL;
hipMalloc((void**)&cudalightIDs, numberOfLights*sizeof(int));
hipMemcpy( cudalightIDs, lightIDs, numberOfLights*sizeof(int), hipMemcpyHostToDevice);
//set up ray pool on device
ray* cudarays = NULL;
int numOfRays = cam.resolution.x * cam.resolution.y;
hipMalloc((void**)&cudarays, numOfRays*sizeof(ray));
hipLaunchKernelGGL(( generateRaypool), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, cudarays, cam, (float)iterations, cudaimage, cudageoms, numberOfGeoms, cudamats, cudalightIDs, numberOfLights, cudatris);
for(int cur_depth=0; cur_depth<traceDepth && numOfRays>0; cur_depth++){
thrust::device_ptr<ray> raypoolStart = thrust::device_pointer_cast(cudarays); //coverts cuda pointer to thrust pointer
thrust::device_ptr<ray> raypoolEnd = thrust::remove_if(raypoolStart, raypoolStart + numOfRays, is_dead());
numOfRays = (int)(raypoolEnd-raypoolStart);
//xBlocks * yBlocks = numOfRays / (tileSize*tileSize)
int xBlocks = (int) ceil( sqrt((float)numOfRays)/(float)(tileSize) );
int yBlocks = (int) ceil( sqrt((float)numOfRays)/(float)(tileSize) );
dim3 newBlocksPerGrid(xBlocks,yBlocks);
hipLaunchKernelGGL(( pathtraceRay), dim3(newBlocksPerGrid), dim3(threadsPerBlock), 0, 0, cudarays, (float)iterations, cur_depth, (int)numOfRays, cudaimage, cudageoms, numberOfGeoms, cudamats, cudalightIDs, numberOfLights, cam, cudatris);
}
//raytraceRay<<<newBlocksPerGrid, threadsPerBlock>>>(cudarays, (float)iterations, cur_depth, (int)numOfRays, cudaimage, cudageoms, numberOfGeoms, cudamats, cudalightIDs, numberOfLights, cam, cudatris);
hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, renderCam->resolution, cudaimage,(float)iterations);
// retrieve image from GPU
hipMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyDeviceToHost);
// free up stuff, or else we'll leak memory like a madman
hipFree( cudaimage );
hipFree( cudageoms );
hipFree( cudamats );
hipFree( cudarays );
hipFree( cudalightIDs );
if(meshID>-1){
hipFree( cudatris );
}
delete geomList;
delete lightIDs;
// make certain the kernel has completed
hipDeviceSynchronize();
checkCUDAError("Kernel failed!");
}
| 5183ff9e39477705eefa3363cc7439f60326e75f.cu | // CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <math.h>
#include "sceneStructs.h"
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
//#define rayTracer 1
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
struct is_dead{
__host__ __device__ bool operator()(const ray& r)
{
return r.isDead;
}
};
// LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
// Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
// TODO: IMPLEMENT THIS FUNCTION
// Function that does the initial raycast from the camera
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){
ray r;
glm::vec3 a = glm::normalize(glm::cross(view, up));
glm::vec3 b = glm::normalize(glm::cross(view, a));
glm::vec3 H = a * glm::length(view) * glm::tan(glm::radians(fov.x));
glm::vec3 V = b * glm::length(view) * glm::tan(glm::radians(fov.y));
glm::vec3 M = eye + view;
glm::vec3 rayDes = M + (2*((float)x/(resolution.x-1)) - 1)*H + (2*((float)y/(resolution.y-1)) - 1)*V;
//get the ray direction from eye to the destination
glm::vec3 thisRay = rayDes - eye;
r.direction = glm::normalize(thisRay);
r.origin = eye;
r.tempColor = glm::vec3(1.0f);
r.isDead = false;
return r;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image, float iterations){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0f/iterations;
color.y = image[index].y*255.0f/iterations;
color.z = image[index].z*255.0f/iterations; //weight for each iteration
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
// loop through all geometry to test ray intersection, returns the geoID that corresponds to intersected geometry
__host__ __device__ int intersectTest(ray r, glm::vec3& intersect, glm::vec3& normal, staticGeom* geoms, int numberOfGeoms, triangle * cudatris){
if(r.isDead)
return -1; //didn't hit anything
float distMin = -2, dist = -1;
glm::vec3 tempIntersect(0.0f);
glm::vec3 tempNormal(0.0f);
int ID = -1;
for (int g=0; g<numberOfGeoms; g++){
if(geoms[g].type == SPHERE){
dist = sphereIntersectionTest(geoms[g], r, tempIntersect, tempNormal);
}
else if(geoms[g].type == CUBE ){
dist = boxIntersectionTest(geoms[g], r, tempIntersect, tempNormal);
}
else if (geoms[g].type == MESH){
dist = polygonIntersectionTest(geoms[g], r, tempIntersect, tempNormal, cudatris);
}
if( (distMin < 0 && dist > -0.5f ) || ( distMin > -1 && dist < distMin && dist > -0.5f ) ){
distMin = dist;
ID = g;
intersect = tempIntersect;
normal = tempNormal;
}
}
return ID;
}
//return true if ray directly hit lights
__host__ __device__ bool LightRayTest(ray r, staticGeom* geoms, int numberOfGeoms, material* materials, triangle * cudatris){
glm::vec3 intersPoint(0.0f);
glm::vec3 intersNormal(0.0f);
//printf("shadow ray: [%f,%f,%f], [%f,%f,%f]\n", sr.origin.x,sr.origin.y,sr.origin.z,sr.direction.x,sr.direction.y,sr.direction.z);
int geoID = intersectTest(r, intersPoint, intersNormal, geoms, numberOfGeoms, cudatris);
if( geoID>-1 && materials[geoms[geoID].materialid].emittance > 0){ //hit light soource
return true;
}
else{
return false;
}
}
//calculates the direct lighting for a certain hit point and modify color of that hit
__device__ __host__ void directLighting(float seed, glm::vec3& theColor, glm::vec3& theIntersect, glm::vec3& theNormal, int geoID, int* lights, int numOfLights, material* cudamats, staticGeom* geoms, int numOfGeoms, triangle * cudatris){
ray shadowRay;
float rayLen;
float lightArea;
glm::vec3 lightNormal;
int chosenLight = lights[0];
if( numOfLights > 1){
thrust::default_random_engine rng(hash(seed));
thrust::uniform_real_distribution<float> u01(0,1);
chosenLight = lights[(int)((float)u01(rng) * numOfLights)];
}
glm::vec3 Plight;
if( geoms[chosenLight].type == CUBE ){
Plight = getRandomPointOnCube( geoms[chosenLight], seed);
}
else if( geoms[chosenLight].type == SPHERE ){
Plight = getRandomPointOnSphere( geoms[chosenLight], seed);
}
shadowRay.direction = glm::normalize(Plight - theIntersect);
shadowRay.origin = theIntersect + (float)EPSILON * shadowRay.direction;
int lightID = glm::length(Plight - theIntersect);
material curMat = cudamats[geoms[geoID].materialid]; //material of the hit goemetry
if(LightRayTest(shadowRay, geoms, numOfGeoms, cudamats, cudatris)){
float cosTerm = glm::clamp( glm::dot( theNormal, shadowRay.direction ), 0.0f, 1.0f); //proportion of facing light
float cosTerm2 = glm::clamp( glm::dot( lightNormal, -shadowRay.direction ), 0.0f, 1.0f); //proportion of incoming light
float areaSampling = lightArea / (float) pow( rayLen, 2.0f) ; // dA/r^2
theColor += cudamats[lightID].emittance * curMat.color * cosTerm * cosTerm2 * areaSampling ;
}
}
#ifdef rayTracer
//TODO: IMPLEMENT THIS FUNCTION
//Core raytracer kernel (recursive)
__host__ __device__ glm::vec3 raytraceRecursive(ray r, int iteration, float currentIndexOfRefraction, int depth, int maximumDepth, staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials, light* lightSources, int numberOfLights){
glm::vec3 bgColor(0.0f);
glm::vec3 ambientColor(1.0f);
glm::vec3 phongColor(0.0f), reflColor(0.0f), refraColor(0.0f);;
glm::vec3 returnColor(0.0f);
float ka = 0.2f;
if(depth > maximumDepth)
return bgColor;
// intersection test
glm::vec3 intersectionPoint, intersectionNormal;
int intersIndex = rayIntersect(r, geoms, numberOfGeoms, intersectionPoint, intersectionNormal, materials);
material mat = materials[geoms[intersIndex].materialid];
if(intersIndex == -1) return bgColor;
else if(mat.emittance > 0.0f){ // intersected with light source geometry
returnColor = mat.color;
}
else{ // intersected with actual geometry
// returnColor = ka * ambientColor * materials[geoms[intersIndex].materialid].color;
if(/*iteration == 0 && */materials[geoms[intersIndex].materialid].hasRefractive == 1)
{
float nextIndexOfRefraction = 1.0f;
glm::vec3 refraDir;
if(abs(currentIndexOfRefraction - 1) < 0.00001f) // current ray is in air
{
refraDir = calculateRefractionDirection(r.direction, intersectionNormal, currentIndexOfRefraction, materials[geoms[intersIndex].materialid].indexOfRefraction, nextIndexOfRefraction);
}
else // current ray is in glass
{
refraDir = calculateRefractionDirection(r.direction, -intersectionNormal, currentIndexOfRefraction, 1.0f, nextIndexOfRefraction);
}
ray refraRay;
refraRay.origin = intersectionPoint + 0.01f * refraDir;
refraRay.direction = refraDir;
refraColor = raytraceRecursive(refraRay, iteration, nextIndexOfRefraction, depth + 1, maximumDepth, geoms, numberOfGeoms, materials, numberOfMaterials, lightSources, numberOfLights);
returnColor += refraColor;
}
if(materials[geoms[intersIndex].materialid].hasReflective == 1)
{
glm::vec3 reflDir = calculateReflectionDirection(intersectionNormal, r.direction);
ray reflRay;
reflRay.origin = intersectionPoint + 0.01f * reflDir;
reflRay.direction = reflDir;
reflColor = raytraceRecursive(reflRay, iteration, 1.0f, depth + 1, maximumDepth, geoms, numberOfGeoms, materials, numberOfMaterials, lightSources, numberOfLights);
returnColor += reflColor;
}
if(iteration < numberOfLights){
if(ShadowRayUnblocked(intersectionPoint, lightSources[iteration].position, geoms, numberOfGeoms, materials))
{
glm::vec3 L = glm::normalize(lightSources[iteration].position - intersectionPoint);
float dot1 = glm::clamp(glm::dot(intersectionNormal, L), 0.0f, 1.0f);
float dot2 = glm::dot(calculateReflectionDirection(intersectionNormal, -L) ,-r.direction);
glm::vec3 diffuse = lightSources[iteration].color * 0.5f * materials[geoms[intersIndex].materialid].color * dot1;
glm::vec3 specular;
if(abs(materials[geoms[intersIndex].materialid].specularExponent) > 1e-6)
specular = lightSources[iteration].color * 0.1f * pow(glm::max(dot2, 0.0f), materials[geoms[intersIndex].materialid].specularExponent);
phongColor += diffuse + specular;
}
}
returnColor += (5.0f / numberOfLights) * (0.1f * (float)numberOfLights * reflColor + (float)numberOfLights * refraColor);
}
return returnColor;
}
__global__ void raytracePrimary(glm::vec2 resolution, int time, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials, int* lightSources, int numberOfLights){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
//on every thread, get color for any pixel given pixel(x,y) and camera
if((x<=resolution.x && y<=resolution.y)){
int init_depth = 0;
ray r = raycastFromCameraKernel(resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov);
colors[index] += raytraceRecursive(r, time, 1.0f, init_depth, rayDepth, geoms, numberOfGeoms, materials, numberOfMaterials, lightSources, numberOfLights);
}
}
#endif
// TODO: IMPLEMENT THIS FUNCTION
// Core path tracer kernel
__global__ void pathtraceRay(ray* rays, float time, int rayDepth, int numOfRays, glm::vec3* colors, staticGeom* geoms, int numberOfGeoms, material* cudamats, int* lights, int numOfLights, cameraData cam, triangle* cudatris){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (int)ceil(sqrt((float)numOfRays))* y;
if( index < numOfRays ){
float seed = (float)index * (float)time * ( (float)rayDepth + 1.0f );
ray r = rays[index];
glm::vec3 Pintersect(0.0f);
glm::vec3 Pnormal(0.0f);
int intersIndex = intersectTest(r, Pintersect, Pnormal, geoms, numberOfGeoms, cudatris);
if(intersIndex!=-1){
material curMat = cudamats[geoms[intersIndex].materialid];
if( curMat.emittance > 0 ){ //ray ends when hit light source
colors[r.pixelIndex] += r.tempColor * curMat.color * curMat.emittance;
r.isDead = true;
}
else{ // for reflection and refraction effect
if(curMat.hasReflective > 0 || curMat.hasRefractive > 0){
Fresnel Fres;
float reflectance;
glm::vec3 reflectDir, transmitDir;
if(glm::dot(r.direction,Pnormal)<0){ //ray is outside
Fres = calculateFresnel(Pnormal,r.direction,1.0f, curMat.indexOfRefraction);
reflectDir = calculateReflectionDirection(Pnormal, r.direction);
transmitDir = calculateTransmissionDirection(Pnormal, r.direction, 1.0f, curMat.indexOfRefraction);
}
else{ //ray is inside
Fres = calculateFresnel(-Pnormal,r.direction, curMat.indexOfRefraction, 1.0f);
reflectDir = calculateReflectionDirection(-Pnormal, r.direction);
transmitDir = calculateTransmissionDirection(-Pnormal, r.direction, curMat.indexOfRefraction, 1.0f);
}
if( curMat.hasRefractive > 0 && curMat.hasReflective > 0){
thrust::default_random_engine rng( hash( seed ) );
thrust::uniform_real_distribution<float> u01(0,1);
if((float) u01(rng) < Fres.reflectionCoefficient ){ //reflected
r.direction = reflectDir;
}
else{ //transmitted
r.direction = transmitDir;
}
}
else if(curMat.hasReflective > 0){
r.direction = reflectDir;
}
else if (curMat.hasRefractive > 0){
r.direction = transmitDir;
}
r.origin = Pintersect + (float)EPSILON * r.direction;
if(glm::length(curMat.color)>0)
r.tempColor *= curMat.color ;
}
else{
thrust::default_random_engine rng(hash(seed));
thrust::uniform_real_distribution<float> u01(0,1);
if((float) u01(rng) < 0.01f ){ //direct light
directLighting(seed,r.tempColor,Pintersect,Pnormal,intersIndex,lights,numOfLights, cudamats,geoms, numberOfGeoms, cudatris);
}
else{
//cos weighted
r.direction = calculateCosWeightedRandomDirInHemisphere(Pnormal, (float) u01(rng), (float) u01(rng));
r.origin = Pintersect + (float)EPSILON * r.direction ;
float diffuseTerm = glm::clamp( glm::dot( Pnormal,r.direction ), 0.0f, 1.0f);
r.tempColor *= diffuseTerm * curMat.color;
}
}
}
}
else{ //if ray hit nothing
r.isDead = true;
}
rays[index] = r;
}
}
//initialize the ray pool for cudarays
__global__ void generateRaypool(ray * rayPool, cameraData cam, float iterations,glm::vec3 *colors, staticGeom* geoms, int numberOfGeoms, material* cudamats, int * lightIDs, int numberOfLights, triangle * cudatris){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * cam.resolution.x);
//ray r = rayPool[index];
if( x<= cam.resolution.x && y <= cam.resolution.y ){
ray r = raycastFromCameraKernel( cam.resolution, iterations, x, y, cam.position, cam.view, cam.up, cam.fov );
r.pixelIndex = index;
if(DEPTH_OF_FIELD){
glm::vec3 focalPoint = r.origin + r.direction * cam.focalLength / glm::dot(cam.view, r.direction); //L = f/cos(theta)
thrust::default_random_engine rng(hash((float)index*iterations));
thrust::uniform_real_distribution<float> u01(0,1);
float theta = 2.0f * PI * u01(rng);
float radius = u01(rng) * cam.aperture;
glm::vec3 eyeOffset(cos(theta)*radius, sin(theta)*radius, 0);
glm::vec3 newEyePoint = cam.position + eyeOffset;
r.origin = newEyePoint;
r.direction = glm::normalize(focalPoint - newEyePoint);
}
glm::vec3 Pintersect(0.0f);
glm::vec3 Pnormal(0.0f);
int geoID = intersectTest(r, Pintersect, Pnormal, geoms, numberOfGeoms, cudatris);
if( geoID > -1){
directLighting((float)index*iterations, colors[index], Pintersect, Pnormal,geoID, lightIDs, numberOfLights, cudamats, geoms, numberOfGeoms, cudatris);
}
rayPool[index] = r;
}
}
// TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaPathTraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms){
int traceDepth = 10; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
// send image to GPU
glm::vec3* cudaimage = NULL;
cudaMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
cudaMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyHostToDevice);
// package geometry and materials and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
int meshID = -1;
triangle* cudatris = NULL;
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
if(geoms[i].type == MESH){
meshID = i; // my code now only handles one obj load (unfortunately as I am not able to handle list of triangles well)
newStaticGeom.boundingBoxMax = geoms[i].boundingBoxMax; //bBox is in local coordinates, dont change over frames.
newStaticGeom.boundingBoxMin = geoms[i].boundingBoxMin;
newStaticGeom.numOfTris = geoms[i].numOfTris;
cudaMalloc((void**)&cudatris, geoms[meshID].numOfTris*sizeof(triangle));
cudaMemcpy( cudatris, geoms[meshID].tris, geoms[meshID].numOfTris *sizeof(triangle), cudaMemcpyHostToDevice);
}
geomList[i] = newStaticGeom;
}
staticGeom* cudageoms = NULL;
cudaMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
cudaMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), cudaMemcpyHostToDevice);
// package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
cam.aperture = renderCam->aperture;
cam.focalLength = renderCam->focalLength;
// material setup
material* cudamats = NULL;
cudaMalloc((void**)&cudamats, numberOfMaterials*sizeof(material));
cudaMemcpy( cudamats, materials, numberOfMaterials*sizeof(material), cudaMemcpyHostToDevice);
//lights setup
int numberOfLights = 0;
for(int i = 0; i < numberOfGeoms; ++i){
if(materials[geoms[i].materialid].emittance > 0){
numberOfLights ++ ;
}
}
int *lightIDs = new int[numberOfLights];
int k = 0;
for(int i = 0; i < numberOfGeoms; ++i){
if(materials[geoms[i].materialid].emittance > 0){
lightIDs[k] = i;
k++;
}
}
int* cudalightIDs = NULL;
cudaMalloc((void**)&cudalightIDs, numberOfLights*sizeof(int));
cudaMemcpy( cudalightIDs, lightIDs, numberOfLights*sizeof(int), cudaMemcpyHostToDevice);
//set up ray pool on device
ray* cudarays = NULL;
int numOfRays = cam.resolution.x * cam.resolution.y;
cudaMalloc((void**)&cudarays, numOfRays*sizeof(ray));
generateRaypool<<<fullBlocksPerGrid, threadsPerBlock>>>(cudarays, cam, (float)iterations, cudaimage, cudageoms, numberOfGeoms, cudamats, cudalightIDs, numberOfLights, cudatris);
for(int cur_depth=0; cur_depth<traceDepth && numOfRays>0; cur_depth++){
thrust::device_ptr<ray> raypoolStart = thrust::device_pointer_cast(cudarays); //coverts cuda pointer to thrust pointer
thrust::device_ptr<ray> raypoolEnd = thrust::remove_if(raypoolStart, raypoolStart + numOfRays, is_dead());
numOfRays = (int)(raypoolEnd-raypoolStart);
//xBlocks * yBlocks = numOfRays / (tileSize*tileSize)
int xBlocks = (int) ceil( sqrt((float)numOfRays)/(float)(tileSize) );
int yBlocks = (int) ceil( sqrt((float)numOfRays)/(float)(tileSize) );
dim3 newBlocksPerGrid(xBlocks,yBlocks);
pathtraceRay<<<newBlocksPerGrid, threadsPerBlock>>>(cudarays, (float)iterations, cur_depth, (int)numOfRays, cudaimage, cudageoms, numberOfGeoms, cudamats, cudalightIDs, numberOfLights, cam, cudatris);
}
//raytraceRay<<<newBlocksPerGrid, threadsPerBlock>>>(cudarays, (float)iterations, cur_depth, (int)numOfRays, cudaimage, cudageoms, numberOfGeoms, cudamats, cudalightIDs, numberOfLights, cam, cudatris);
sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, renderCam->resolution, cudaimage,(float)iterations);
// retrieve image from GPU
cudaMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyDeviceToHost);
// free up stuff, or else we'll leak memory like a madman
cudaFree( cudaimage );
cudaFree( cudageoms );
cudaFree( cudamats );
cudaFree( cudarays );
cudaFree( cudalightIDs );
if(meshID>-1){
cudaFree( cudatris );
}
delete geomList;
delete lightIDs;
// make certain the kernel has completed
cudaThreadSynchronize();
checkCUDAError("Kernel failed!");
}
|
dd2ab03c1d64d4a34870a03cca6fc5aa3855d6eb.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <gtest/gtest.h>
#include <dmlc/filesystem.h>
#include <xgboost/c_api.h>
#include <xgboost/predictor.h>
#include <xgboost/logging.h>
#include <xgboost/learner.h>
#include <string>
#include "../helpers.h"
#include "../../../src/gbm/gbtree_model.h"
#include "../../../src/data/device_adapter.cuh"
#include "test_predictor.h"
namespace xgboost {
namespace predictor {
TEST(GPUPredictor, Basic) {
auto cpu_lparam = CreateEmptyGenericParam(-1);
auto gpu_lparam = CreateEmptyGenericParam(0);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &gpu_lparam));
std::unique_ptr<Predictor> cpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor", &cpu_lparam));
gpu_predictor->Configure({});
cpu_predictor->Configure({});
for (size_t i = 1; i < 33; i *= 2) {
int n_row = i, n_col = i;
auto dmat = RandomDataGenerator(n_row, n_col, 0).GenerateDMatrix();
LearnerModelParam param;
param.num_feature = n_col;
param.num_output_group = 1;
param.base_score = 0.5;
gbm::GBTreeModel model = CreateTestModel(¶m);
// Test predict batch
PredictionCacheEntry gpu_out_predictions;
PredictionCacheEntry cpu_out_predictions;
gpu_predictor->PredictBatch(dmat.get(), &gpu_out_predictions, model, 0);
ASSERT_EQ(model.trees.size(), gpu_out_predictions.version);
cpu_predictor->PredictBatch(dmat.get(), &cpu_out_predictions, model, 0);
std::vector<float>& gpu_out_predictions_h = gpu_out_predictions.predictions.HostVector();
std::vector<float>& cpu_out_predictions_h = cpu_out_predictions.predictions.HostVector();
float abs_tolerance = 0.001;
for (int j = 0; j < gpu_out_predictions.predictions.Size(); j++) {
ASSERT_NEAR(gpu_out_predictions_h[j], cpu_out_predictions_h[j], abs_tolerance);
}
}
}
TEST(GPUPredictor, EllpackBasic) {
size_t constexpr kCols {8};
for (size_t bins = 2; bins < 258; bins += 16) {
size_t rows = bins * 16;
auto p_m = RandomDataGenerator{rows, kCols, 0.0}
.Bins(bins)
.Device(0)
.GenerateDeviceDMatrix(true);
TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", rows, kCols, p_m);
TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", bins, kCols, p_m);
}
}
TEST(GPUPredictor, EllpackTraining) {
size_t constexpr kRows { 128 }, kCols { 16 }, kBins { 64 };
auto p_ellpack = RandomDataGenerator{kRows, kCols, 0.0}
.Bins(kBins)
.Device(0)
.GenerateDeviceDMatrix(true);
HostDeviceVector<float> storage(kRows * kCols);
auto columnar = RandomDataGenerator{kRows, kCols, 0.0}
.Device(0)
.GenerateArrayInterface(&storage);
auto adapter = data::CupyAdapter(columnar);
std::shared_ptr<DMatrix> p_full {
DMatrix::Create(&adapter, std::numeric_limits<float>::quiet_NaN(), 1)
};
TestTrainingPrediction(kRows, kBins, "gpu_hist", p_full, p_ellpack);
}
TEST(GPUPredictor, ExternalMemoryTest) {
auto lparam = CreateEmptyGenericParam(0);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &lparam));
gpu_predictor->Configure({});
LearnerModelParam param;
param.num_feature = 5;
const int n_classes = 3;
param.num_output_group = n_classes;
param.base_score = 0.5;
gbm::GBTreeModel model = CreateTestModel(¶m, n_classes);
std::vector<std::unique_ptr<DMatrix>> dmats;
dmlc::TemporaryDirectory tmpdir;
std::string file0 = tmpdir.path + "/big_0.libsvm";
std::string file1 = tmpdir.path + "/big_1.libsvm";
std::string file2 = tmpdir.path + "/big_2.libsvm";
dmats.push_back(CreateSparsePageDMatrix(9, 64UL, file0));
dmats.push_back(CreateSparsePageDMatrix(128, 128UL, file1));
dmats.push_back(CreateSparsePageDMatrix(1024, 1024UL, file2));
for (const auto& dmat: dmats) {
dmat->Info().base_margin_.Resize(dmat->Info().num_row_ * n_classes, 0.5);
PredictionCacheEntry out_predictions;
gpu_predictor->PredictBatch(dmat.get(), &out_predictions, model, 0);
EXPECT_EQ(out_predictions.predictions.Size(), dmat->Info().num_row_ * n_classes);
const std::vector<float> &host_vector = out_predictions.predictions.ConstHostVector();
for (int i = 0; i < host_vector.size() / n_classes; i++) {
ASSERT_EQ(host_vector[i * n_classes], 2.0);
ASSERT_EQ(host_vector[i * n_classes + 1], 0.5);
ASSERT_EQ(host_vector[i * n_classes + 2], 0.5);
}
}
}
TEST(GPUPredictor, InplacePredictCupy) {
size_t constexpr kRows{128}, kCols{64};
RandomDataGenerator gen(kRows, kCols, 0.5);
gen.Device(0);
HostDeviceVector<float> data;
std::string interface_str = gen.GenerateArrayInterface(&data);
auto x = std::make_shared<data::CupyAdapter>(interface_str);
TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0);
}
TEST(GPUPredictor, InplacePredictCuDF) {
size_t constexpr kRows{128}, kCols{64};
RandomDataGenerator gen(kRows, kCols, 0.5);
gen.Device(0);
std::vector<HostDeviceVector<float>> storage(kCols);
auto interface_str = gen.GenerateColumnarArrayInterface(&storage);
auto x = std::make_shared<data::CudfAdapter>(interface_str);
TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0);
}
TEST(GPUPredictor, MGPU_InplacePredict) { // NOLINT
int32_t n_gpus = xgboost::common::AllVisibleGPUs();
if (n_gpus <= 1) {
LOG(WARNING) << "GPUPredictor.MGPU_InplacePredict is skipped.";
return;
}
size_t constexpr kRows{128}, kCols{64};
RandomDataGenerator gen(kRows, kCols, 0.5);
gen.Device(1);
HostDeviceVector<float> data;
std::string interface_str = gen.GenerateArrayInterface(&data);
auto x = std::make_shared<data::CupyAdapter>(interface_str);
TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 1);
EXPECT_THROW(TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0),
dmlc::Error);
}
TEST(GpuPredictor, LesserFeatures) {
TestPredictionWithLesserFeatures("gpu_predictor");
}
// Very basic test of empty model
TEST(GPUPredictor, ShapStump) {
hipSetDevice(0);
LearnerModelParam param;
param.num_feature = 1;
param.num_output_group = 1;
param.base_score = 0.5;
gbm::GBTreeModel model(¶m);
std::vector<std::unique_ptr<RegTree>> trees;
trees.push_back(std::unique_ptr<RegTree>(new RegTree));
model.CommitModel(std::move(trees), 0);
auto gpu_lparam = CreateEmptyGenericParam(0);
std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("gpu_predictor", &gpu_lparam));
gpu_predictor->Configure({});
HostDeviceVector<float> predictions;
auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix();
gpu_predictor->PredictContribution(dmat.get(), &predictions, model);
auto& phis = predictions.HostVector();
EXPECT_EQ(phis[0], 0.0);
EXPECT_EQ(phis[1], param.base_score);
EXPECT_EQ(phis[2], 0.0);
EXPECT_EQ(phis[3], param.base_score);
EXPECT_EQ(phis[4], 0.0);
EXPECT_EQ(phis[5], param.base_score);
}
TEST(GPUPredictor, Shap) {
LearnerModelParam param;
param.num_feature = 1;
param.num_output_group = 1;
param.base_score = 0.5;
gbm::GBTreeModel model(¶m);
std::vector<std::unique_ptr<RegTree>> trees;
trees.push_back(std::unique_ptr<RegTree>(new RegTree));
trees[0]->ExpandNode(0, 0, 0.5, true, 1.0, -1.0, 1.0, 0.0, 5.0, 2.0, 3.0);
model.CommitModel(std::move(trees), 0);
auto gpu_lparam = CreateEmptyGenericParam(0);
auto cpu_lparam = CreateEmptyGenericParam(-1);
std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("gpu_predictor", &gpu_lparam));
std::unique_ptr<Predictor> cpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("cpu_predictor", &cpu_lparam));
gpu_predictor->Configure({});
cpu_predictor->Configure({});
HostDeviceVector<float> predictions;
HostDeviceVector<float> cpu_predictions;
auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix();
gpu_predictor->PredictContribution(dmat.get(), &predictions, model);
cpu_predictor->PredictContribution(dmat.get(), &cpu_predictions, model);
auto& phis = predictions.HostVector();
auto& cpu_phis = cpu_predictions.HostVector();
for (auto i = 0ull; i < phis.size(); i++) {
EXPECT_NEAR(cpu_phis[i], phis[i], 1e-3);
}
}
} // namespace predictor
} // namespace xgboost
| dd2ab03c1d64d4a34870a03cca6fc5aa3855d6eb.cu | /*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <gtest/gtest.h>
#include <dmlc/filesystem.h>
#include <xgboost/c_api.h>
#include <xgboost/predictor.h>
#include <xgboost/logging.h>
#include <xgboost/learner.h>
#include <string>
#include "../helpers.h"
#include "../../../src/gbm/gbtree_model.h"
#include "../../../src/data/device_adapter.cuh"
#include "test_predictor.h"
namespace xgboost {
namespace predictor {
TEST(GPUPredictor, Basic) {
auto cpu_lparam = CreateEmptyGenericParam(-1);
auto gpu_lparam = CreateEmptyGenericParam(0);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &gpu_lparam));
std::unique_ptr<Predictor> cpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor", &cpu_lparam));
gpu_predictor->Configure({});
cpu_predictor->Configure({});
for (size_t i = 1; i < 33; i *= 2) {
int n_row = i, n_col = i;
auto dmat = RandomDataGenerator(n_row, n_col, 0).GenerateDMatrix();
LearnerModelParam param;
param.num_feature = n_col;
param.num_output_group = 1;
param.base_score = 0.5;
gbm::GBTreeModel model = CreateTestModel(¶m);
// Test predict batch
PredictionCacheEntry gpu_out_predictions;
PredictionCacheEntry cpu_out_predictions;
gpu_predictor->PredictBatch(dmat.get(), &gpu_out_predictions, model, 0);
ASSERT_EQ(model.trees.size(), gpu_out_predictions.version);
cpu_predictor->PredictBatch(dmat.get(), &cpu_out_predictions, model, 0);
std::vector<float>& gpu_out_predictions_h = gpu_out_predictions.predictions.HostVector();
std::vector<float>& cpu_out_predictions_h = cpu_out_predictions.predictions.HostVector();
float abs_tolerance = 0.001;
for (int j = 0; j < gpu_out_predictions.predictions.Size(); j++) {
ASSERT_NEAR(gpu_out_predictions_h[j], cpu_out_predictions_h[j], abs_tolerance);
}
}
}
TEST(GPUPredictor, EllpackBasic) {
size_t constexpr kCols {8};
for (size_t bins = 2; bins < 258; bins += 16) {
size_t rows = bins * 16;
auto p_m = RandomDataGenerator{rows, kCols, 0.0}
.Bins(bins)
.Device(0)
.GenerateDeviceDMatrix(true);
TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", rows, kCols, p_m);
TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", bins, kCols, p_m);
}
}
TEST(GPUPredictor, EllpackTraining) {
size_t constexpr kRows { 128 }, kCols { 16 }, kBins { 64 };
auto p_ellpack = RandomDataGenerator{kRows, kCols, 0.0}
.Bins(kBins)
.Device(0)
.GenerateDeviceDMatrix(true);
HostDeviceVector<float> storage(kRows * kCols);
auto columnar = RandomDataGenerator{kRows, kCols, 0.0}
.Device(0)
.GenerateArrayInterface(&storage);
auto adapter = data::CupyAdapter(columnar);
std::shared_ptr<DMatrix> p_full {
DMatrix::Create(&adapter, std::numeric_limits<float>::quiet_NaN(), 1)
};
TestTrainingPrediction(kRows, kBins, "gpu_hist", p_full, p_ellpack);
}
TEST(GPUPredictor, ExternalMemoryTest) {
auto lparam = CreateEmptyGenericParam(0);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &lparam));
gpu_predictor->Configure({});
LearnerModelParam param;
param.num_feature = 5;
const int n_classes = 3;
param.num_output_group = n_classes;
param.base_score = 0.5;
gbm::GBTreeModel model = CreateTestModel(¶m, n_classes);
std::vector<std::unique_ptr<DMatrix>> dmats;
dmlc::TemporaryDirectory tmpdir;
std::string file0 = tmpdir.path + "/big_0.libsvm";
std::string file1 = tmpdir.path + "/big_1.libsvm";
std::string file2 = tmpdir.path + "/big_2.libsvm";
dmats.push_back(CreateSparsePageDMatrix(9, 64UL, file0));
dmats.push_back(CreateSparsePageDMatrix(128, 128UL, file1));
dmats.push_back(CreateSparsePageDMatrix(1024, 1024UL, file2));
for (const auto& dmat: dmats) {
dmat->Info().base_margin_.Resize(dmat->Info().num_row_ * n_classes, 0.5);
PredictionCacheEntry out_predictions;
gpu_predictor->PredictBatch(dmat.get(), &out_predictions, model, 0);
EXPECT_EQ(out_predictions.predictions.Size(), dmat->Info().num_row_ * n_classes);
const std::vector<float> &host_vector = out_predictions.predictions.ConstHostVector();
for (int i = 0; i < host_vector.size() / n_classes; i++) {
ASSERT_EQ(host_vector[i * n_classes], 2.0);
ASSERT_EQ(host_vector[i * n_classes + 1], 0.5);
ASSERT_EQ(host_vector[i * n_classes + 2], 0.5);
}
}
}
TEST(GPUPredictor, InplacePredictCupy) {
size_t constexpr kRows{128}, kCols{64};
RandomDataGenerator gen(kRows, kCols, 0.5);
gen.Device(0);
HostDeviceVector<float> data;
std::string interface_str = gen.GenerateArrayInterface(&data);
auto x = std::make_shared<data::CupyAdapter>(interface_str);
TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0);
}
TEST(GPUPredictor, InplacePredictCuDF) {
size_t constexpr kRows{128}, kCols{64};
RandomDataGenerator gen(kRows, kCols, 0.5);
gen.Device(0);
std::vector<HostDeviceVector<float>> storage(kCols);
auto interface_str = gen.GenerateColumnarArrayInterface(&storage);
auto x = std::make_shared<data::CudfAdapter>(interface_str);
TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0);
}
TEST(GPUPredictor, MGPU_InplacePredict) { // NOLINT
int32_t n_gpus = xgboost::common::AllVisibleGPUs();
if (n_gpus <= 1) {
LOG(WARNING) << "GPUPredictor.MGPU_InplacePredict is skipped.";
return;
}
size_t constexpr kRows{128}, kCols{64};
RandomDataGenerator gen(kRows, kCols, 0.5);
gen.Device(1);
HostDeviceVector<float> data;
std::string interface_str = gen.GenerateArrayInterface(&data);
auto x = std::make_shared<data::CupyAdapter>(interface_str);
TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 1);
EXPECT_THROW(TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0),
dmlc::Error);
}
TEST(GpuPredictor, LesserFeatures) {
TestPredictionWithLesserFeatures("gpu_predictor");
}
// Very basic test of empty model
TEST(GPUPredictor, ShapStump) {
cudaSetDevice(0);
LearnerModelParam param;
param.num_feature = 1;
param.num_output_group = 1;
param.base_score = 0.5;
gbm::GBTreeModel model(¶m);
std::vector<std::unique_ptr<RegTree>> trees;
trees.push_back(std::unique_ptr<RegTree>(new RegTree));
model.CommitModel(std::move(trees), 0);
auto gpu_lparam = CreateEmptyGenericParam(0);
std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("gpu_predictor", &gpu_lparam));
gpu_predictor->Configure({});
HostDeviceVector<float> predictions;
auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix();
gpu_predictor->PredictContribution(dmat.get(), &predictions, model);
auto& phis = predictions.HostVector();
EXPECT_EQ(phis[0], 0.0);
EXPECT_EQ(phis[1], param.base_score);
EXPECT_EQ(phis[2], 0.0);
EXPECT_EQ(phis[3], param.base_score);
EXPECT_EQ(phis[4], 0.0);
EXPECT_EQ(phis[5], param.base_score);
}
TEST(GPUPredictor, Shap) {
LearnerModelParam param;
param.num_feature = 1;
param.num_output_group = 1;
param.base_score = 0.5;
gbm::GBTreeModel model(¶m);
std::vector<std::unique_ptr<RegTree>> trees;
trees.push_back(std::unique_ptr<RegTree>(new RegTree));
trees[0]->ExpandNode(0, 0, 0.5, true, 1.0, -1.0, 1.0, 0.0, 5.0, 2.0, 3.0);
model.CommitModel(std::move(trees), 0);
auto gpu_lparam = CreateEmptyGenericParam(0);
auto cpu_lparam = CreateEmptyGenericParam(-1);
std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("gpu_predictor", &gpu_lparam));
std::unique_ptr<Predictor> cpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("cpu_predictor", &cpu_lparam));
gpu_predictor->Configure({});
cpu_predictor->Configure({});
HostDeviceVector<float> predictions;
HostDeviceVector<float> cpu_predictions;
auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix();
gpu_predictor->PredictContribution(dmat.get(), &predictions, model);
cpu_predictor->PredictContribution(dmat.get(), &cpu_predictions, model);
auto& phis = predictions.HostVector();
auto& cpu_phis = cpu_predictions.HostVector();
for (auto i = 0ull; i < phis.size(); i++) {
EXPECT_NEAR(cpu_phis[i], phis[i], 1e-3);
}
}
} // namespace predictor
} // namespace xgboost
|
3828bc4151a5049c33b9cc938315032b31eebe5b.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <hip/hip_runtime.h>
#include "CudaSolver.h"
#include "cudaHelper.h"
#include "generalSimplex.h"
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
namespace solver {
CudaSolver::CudaSolver(const int num_vars, const int num_constrs,
const int device_id)
: CpuSolver(num_vars, num_constrs),
device_id_(device_id) {
// Device initializations
CHECK(hipSetDevice(0));
CHECK(hipGetDeviceProperties(&prop_, device_id));
const size_t sz_bounds = (ncols_ + nrows_) * sizeof(float);
memalloc(&d_lower_, sz_bounds);
memalloc(&d_upper_, sz_bounds);
memalloc(&d_assigns_, sz_bounds);
memalloc(&d_tableau_, nrows_ * ncols_ * sizeof(float));
memalloc(&d_tableau_row_, ncols_ * sizeof(float));
memalloc(&d_col_to_var_, ncols_ * sizeof(int));
CHECK(hipDeviceSynchronize());
}
CudaSolver::~CudaSolver() {
memfree(d_tableau_);
memfree(d_tableau_row_);
memfree(d_lower_);
memfree(d_upper_);
memfree(d_assigns_);
memfree(d_col_to_var_);
hipDeviceReset();
}
void CudaSolver::pre_solve() {
const size_t sz_bounds = (ncols_ + nrows_) * sizeof(float);
memcpyH2D(d_tableau_, tableau_, nrows_ * ncols_ * sizeof(float));
memcpyH2D(d_col_to_var_, col_to_var_, ncols_ * sizeof(int));
memcpyH2D(d_upper_, upper_, sz_bounds);
memcpyH2D(d_assigns_, assigns_, sz_bounds);
memcpyH2D(d_lower_, lower_, sz_bounds);
}
bool CudaSolver::find_suitable(const int broken_idx, int &suitable_idx) {
const size_t offset = OFFSET(var_to_tableau_[broken_idx], 0, ncols_);
memcpyD2H(&tableau_[offset], &d_tableau_[offset], ncols_ * sizeof(float));
bool result = CpuSolver::find_suitable(broken_idx, suitable_idx);
// Copy updated assignments to device
if (result == true) {
memcpyH2D(&d_assigns_[broken_idx], &assigns_[broken_idx], sizeof(float));
memcpyH2D(&d_assigns_[suitable_idx], &assigns_[suitable_idx],
sizeof(float));
}
return result;
}
void CudaSolver::pivot(const int broken_idx, const int suitable_idx) {
const int pivot_row = var_to_tableau_[broken_idx];
const int pivot_col = var_to_tableau_[suitable_idx];
//printf("Pivot(%d,%d)\n", pivot_row, pivot_col);
// Save the current pivot element (alpha)
const int alpha_idx = OFFSET(pivot_row, pivot_col, ncols_);
float alpha = tableau_[alpha_idx];
// Kernel configurations
const dim3 block_inner(32, 32, 1);
const dim3 grid_inner((nrows_ + 31) / 32, (ncols_ + 31) / 32, 1);
const dim3 block_row(256, 1, 1);
const dim3 grid_row((ncols_ + 255) / 256, 1, 1);
const dim3 block_column(256, 1, 1);
const dim3 grid_column((ncols_ + 255) / 256, 1, 1);
// Update the tableau_
hipLaunchKernelGGL(( device::pivot_update_inner), dim3(grid_inner), dim3(block_inner), 0, 0, alpha, pivot_row,
pivot_col, nrows_, ncols_, d_tableau_);
hipLaunchKernelGGL(( device::pivot_update_row), dim3(grid_row), dim3(block_row), 0, 0, alpha, pivot_row, ncols_,
d_tableau_);
hipLaunchKernelGGL(( device::pivot_update_column), dim3(grid_column), dim3(block_column), 0, 0, alpha, pivot_col,
nrows_, ncols_, d_tableau_);
// Update pivot element on the device
alpha = 1.0f / alpha;
memcpyH2D(&d_tableau_[alpha_idx], &alpha, sizeof(float));
// Swap the basic_ and nonbasic_ variables
swap(pivot_row, pivot_col, broken_idx, suitable_idx);
}
void CudaSolver::swap(const int row, const int col, const int basic_idx,
const int nonbasic_idx) {
CpuSolver::swap(row, col, basic_idx, nonbasic_idx);
// Update column to variable mapping on device
memcpyH2D(&d_col_to_var_[col], &col_to_var_[col], sizeof(int));
}
float CudaSolver::compute_assignment(const int idx) const {
// D2D copy of the current tableau_ row
const int rowIdx = var_to_tableau_[idx];
const size_t offset = rowIdx * ncols_;
memcpyD2D(d_tableau_row_, &d_tableau_[offset], ncols_ * sizeof(float));
// Run kernel that multiples each element of row by its respective variable assignment
const int n = ncols_;
const int block_size = 256;
const int nblocks = (n + block_size - 1) / block_size;
hipLaunchKernelGGL(( device::update_assignment_row_multiply), dim3(nblocks), dim3(block_size), 0, 0, ncols_,
d_col_to_var_, d_assigns_, d_tableau_row_);
// Perform a sum reduction of the row
thrust::device_ptr<float> dev_ptr = thrust::device_pointer_cast(
d_tableau_row_);
thrust::device_vector<float> vec(dev_ptr, dev_ptr + ncols_);
float val = thrust::reduce(vec.begin(), vec.begin() + ncols_);
// Update the assignment
const_cast<CudaSolver*>(this)->assigns_[idx] = val;
const_cast<CudaSolver*>(this)->map_assigns_[idx] = get_step_count();
return val;
}
}
| 3828bc4151a5049c33b9cc938315032b31eebe5b.cu | #include <cassert>
#include <cuda_runtime.h>
#include "CudaSolver.h"
#include "cudaHelper.h"
#include "generalSimplex.h"
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
namespace solver {
CudaSolver::CudaSolver(const int num_vars, const int num_constrs,
const int device_id)
: CpuSolver(num_vars, num_constrs),
device_id_(device_id) {
// Device initializations
CHECK(cudaSetDevice(0));
CHECK(cudaGetDeviceProperties(&prop_, device_id));
const size_t sz_bounds = (ncols_ + nrows_) * sizeof(float);
memalloc(&d_lower_, sz_bounds);
memalloc(&d_upper_, sz_bounds);
memalloc(&d_assigns_, sz_bounds);
memalloc(&d_tableau_, nrows_ * ncols_ * sizeof(float));
memalloc(&d_tableau_row_, ncols_ * sizeof(float));
memalloc(&d_col_to_var_, ncols_ * sizeof(int));
CHECK(cudaDeviceSynchronize());
}
CudaSolver::~CudaSolver() {
memfree(d_tableau_);
memfree(d_tableau_row_);
memfree(d_lower_);
memfree(d_upper_);
memfree(d_assigns_);
memfree(d_col_to_var_);
cudaDeviceReset();
}
void CudaSolver::pre_solve() {
const size_t sz_bounds = (ncols_ + nrows_) * sizeof(float);
memcpyH2D(d_tableau_, tableau_, nrows_ * ncols_ * sizeof(float));
memcpyH2D(d_col_to_var_, col_to_var_, ncols_ * sizeof(int));
memcpyH2D(d_upper_, upper_, sz_bounds);
memcpyH2D(d_assigns_, assigns_, sz_bounds);
memcpyH2D(d_lower_, lower_, sz_bounds);
}
bool CudaSolver::find_suitable(const int broken_idx, int &suitable_idx) {
const size_t offset = OFFSET(var_to_tableau_[broken_idx], 0, ncols_);
memcpyD2H(&tableau_[offset], &d_tableau_[offset], ncols_ * sizeof(float));
bool result = CpuSolver::find_suitable(broken_idx, suitable_idx);
// Copy updated assignments to device
if (result == true) {
memcpyH2D(&d_assigns_[broken_idx], &assigns_[broken_idx], sizeof(float));
memcpyH2D(&d_assigns_[suitable_idx], &assigns_[suitable_idx],
sizeof(float));
}
return result;
}
void CudaSolver::pivot(const int broken_idx, const int suitable_idx) {
const int pivot_row = var_to_tableau_[broken_idx];
const int pivot_col = var_to_tableau_[suitable_idx];
//printf("Pivot(%d,%d)\n", pivot_row, pivot_col);
// Save the current pivot element (alpha)
const int alpha_idx = OFFSET(pivot_row, pivot_col, ncols_);
float alpha = tableau_[alpha_idx];
// Kernel configurations
const dim3 block_inner(32, 32, 1);
const dim3 grid_inner((nrows_ + 31) / 32, (ncols_ + 31) / 32, 1);
const dim3 block_row(256, 1, 1);
const dim3 grid_row((ncols_ + 255) / 256, 1, 1);
const dim3 block_column(256, 1, 1);
const dim3 grid_column((ncols_ + 255) / 256, 1, 1);
// Update the tableau_
device::pivot_update_inner<<<grid_inner, block_inner>>>(alpha, pivot_row,
pivot_col, nrows_, ncols_, d_tableau_);
device::pivot_update_row<<<grid_row, block_row>>>(alpha, pivot_row, ncols_,
d_tableau_);
device::pivot_update_column<<<grid_column, block_column>>>(alpha, pivot_col,
nrows_, ncols_, d_tableau_);
// Update pivot element on the device
alpha = 1.0f / alpha;
memcpyH2D(&d_tableau_[alpha_idx], &alpha, sizeof(float));
// Swap the basic_ and nonbasic_ variables
swap(pivot_row, pivot_col, broken_idx, suitable_idx);
}
void CudaSolver::swap(const int row, const int col, const int basic_idx,
const int nonbasic_idx) {
CpuSolver::swap(row, col, basic_idx, nonbasic_idx);
// Update column to variable mapping on device
memcpyH2D(&d_col_to_var_[col], &col_to_var_[col], sizeof(int));
}
float CudaSolver::compute_assignment(const int idx) const {
// D2D copy of the current tableau_ row
const int rowIdx = var_to_tableau_[idx];
const size_t offset = rowIdx * ncols_;
memcpyD2D(d_tableau_row_, &d_tableau_[offset], ncols_ * sizeof(float));
// Run kernel that multiples each element of row by its respective variable assignment
const int n = ncols_;
const int block_size = 256;
const int nblocks = (n + block_size - 1) / block_size;
device::update_assignment_row_multiply<<<nblocks, block_size>>>(ncols_,
d_col_to_var_, d_assigns_, d_tableau_row_);
// Perform a sum reduction of the row
thrust::device_ptr<float> dev_ptr = thrust::device_pointer_cast(
d_tableau_row_);
thrust::device_vector<float> vec(dev_ptr, dev_ptr + ncols_);
float val = thrust::reduce(vec.begin(), vec.begin() + ncols_);
// Update the assignment
const_cast<CudaSolver*>(this)->assigns_[idx] = val;
const_cast<CudaSolver*>(this)->map_assigns_[idx] = get_step_count();
return val;
}
}
|
6bf2be2d27f890b6fa72665d2b7fcb9545a3acf8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// https://www.slideserve.com/lars/3d-simulation-of-particle-motion-in-lid-driven-cavity-flow-by-mrt-lbm
#include "helper_cuda.h"
#include "helper_math.h"
#include <cassert>
#include <cstdio>
#include <cmath>
#include <GL/glut.h>
template <int NX, int NY, int NZ, class T>
struct volume {
T *grid;
void allocate() {
size_t size = NX * NY * NZ;
checkCudaErrors(hipMallocManaged(&grid, size * sizeof(T)));
}
void free() {
checkCudaErrors(hipFree(grid));
}
__host__ __device__ T &at(int i, int j, int k) const {
return grid[i + j * NX + k * NX * NY];
}
__host__ __device__ auto &at(int c, int i, int j, int k) const {
return at(i, j, k)[c];
}
};
template <int NX, int NY, int NZ, class T, int N>
struct volume_soa {
T *grid;
void allocate() {
size_t size = NX * NY * NZ * N;
checkCudaErrors(hipMallocManaged(&grid, size * sizeof(T)));
}
void free() {
checkCudaErrors(hipFree(grid));
}
__host__ __device__ T &at(int c, int i, int j, int k) const {
return grid[i + j * NX + k * NX * NY + c * NX * NY * NZ];
}
};
#define GSL(_, start, end) \
int _ = (start) + blockDim._ * blockIdx._ + threadIdx._; \
_ < (end); _ += blockDim._ * gridDim._
static inline __constant__ const int directions[][3] = {{0,0,0},{1,0,0},{-1,0,0},{0,1,0},{0,-1,0},{0,0,1},{0,0,-1},{1,1,1},{-1,-1,-1},{1,1,-1},{-1,-1,1},{1,-1,1},{-1,1,-1},{-1,1,1},{1,-1,-1}};
static inline __constant__ const float weights[] = {2.f/9.f, 1.f/9.f, 1.f/9.f, 1.f/9.f, 1.f/9.f, 1.f/9.f, 1.f/9.f,1.f/72.f, 1.f/72.f, 1.f/72.f, 1.f/72.f, 1.f/72.f, 1.f/72.f, 1.f/72.f, 1.f/72.f};
[[maybe_unused]] static inline __constant__ const int inverse_index[] = {0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13};
static_assert(sizeof(weights) / sizeof(weights[0]) == 15);
static inline const float niu = 0.005f;
static inline const float tau = 3.f * niu + 0.5f;
static inline const float inv_tau = 1.f / tau;
template <int NX, int NY, int NZ>
struct LBM {
volume<NX, NY, NZ, float4> vel;
volume_soa<NX, NY, NZ, float, 16> f_new;
volume_soa<NX, NY, NZ, float, 16> f_old;
void allocate() {
vel.allocate();
f_new.allocate();
f_old.allocate();
}
__device__ float f_eq(int q, int x, int y, int z) {
float4 v = vel.at(x, y, z);
float eu = v.x * directions[q][0]
+ v.y * directions[q][1] + v.z * directions[q][2];
float uv = v.x * v.x + v.y * v.y + v.z * v.z;
float term = 1.f + 3.f * eu + 4.5f * eu * eu - 1.5f * uv;
float feq = weights[q] * v.w * term;
return feq;
}
};
template <int NX, int NY, int NZ>
__global__ void initialize1(LBM<NX, NY, NZ> lbm, int type) {
for (GSL(z, 0, NZ)) for (GSL(y, 0, NY)) for (GSL(x, 0, NX)) {
lbm.vel.at(x, y, z) = make_float4(0.f, 0.f, 0.f, 1.f);
}
}
template <int NX, int NY, int NZ>
__global__ void initialize2(LBM<NX, NY, NZ> lbm) {
for (GSL(z, 0, NZ)) for (GSL(y, 0, NY)) for (GSL(x, 0, NX)) {
for (int q = 0; q < 15; q++) {
float f = lbm.f_eq(q, x, y, z);
lbm.f_new.at(q, x, y, z) = f;
lbm.f_old.at(q, x, y, z) = f;
}
}
}
template <int NX, int NY, int NZ>
void initialize(LBM<NX, NY, NZ> lbm, int type) {
hipLaunchKernelGGL(( initialize1), dim3(dim3(NX / 8, NY / 8, NZ / 8)), dim3(dim3(8, 8, 8)), 0, 0, lbm, type);
hipLaunchKernelGGL(( initialize2), dim3(dim3(NX / 8, NY / 8, NZ / 8)), dim3(dim3(8, 8, 8)), 0, 0, lbm);
}
template <int NX, int NY, int NZ, class T>
__device__ auto trilerp(T const &t, int x, int y, int z, int dx, int dy, int dz) {
float c0 = 0.85f;
float c1 = 1.f - c0;
float x0 = dx ? c1 : c0;
float x1 = dx ? c0 : c1;
float y0 = dy ? c1 : c0;
float y1 = dy ? c0 : c1;
float z0 = dz ? c1 : c0;
float z1 = dz ? c0 : c1;
int x_ = x, y_ = y, z_ = z;
if (x_ < NX - 1) x_++;
if (y_ < NY - 1) y_++;
if (z_ < NZ - 1) z_++;
if (!dx && x > 0) x--;
if (!dy && y > 0) y--;
if (!dz && z > 0) z--;
return x0 * y0 * z0 * t(x, y, z)
+ x0 * y0 * z1 * t(x, y, z_)
+ x0 * y1 * z0 * t(x, y_, z)
+ x0 * y1 * z1 * t(x, y_, z_)
+ x1 * y0 * z0 * t(x_, y, z)
+ x1 * y0 * z1 * t(x_, y, z_)
+ x1 * y1 * z0 * t(x_, y_, z)
+ x1 * y1 * z1 * t(x_, y_, z_);
}
template <int NX, int NY, int NZ>
__global__ void substep1(LBM<NX, NY, NZ> lbm) {
//for (GSL(z, 1, NZ - 1)) for (GSL(y, 1, NY - 1)) for (GSL(x, 1, NX - 1)) {
for (GSL(z, 0, NZ)) for (GSL(y, 0, NY)) for (GSL(x, 0, NX)) {
for (int q = 0; q < 15; q++) {
//int mdx = x - directions[q][0];
//int mdy = y - directions[q][1];
//int mdz = z - directions[q][2];
int mdx = (x - directions[q][0] + NX) % NX;
int mdy = (y - directions[q][1] + NY) % NY;
int mdz = (z - directions[q][2] + NZ) % NZ;
lbm.f_new.at(q, x, y, z) = lbm.f_old.at(q, mdx, mdy, mdz)
* (1.f - inv_tau) + lbm.f_eq(q, mdx, mdy, mdz) * inv_tau;
}
}
}
/*__global__ void substep11(LBM lbm) {
for (GSL(z, 0, NZ)) for (GSL(y, 0, NY)) for (GSL(x, 0, NX)) {
self.feq
}
}
__global__ void substep12(LBM lbm) {
//for (GSL(z, 1, NZ - 1)) for (GSL(y, 1, NY - 1)) for (GSL(x, 1, NX - 1)) {
for (GSL(z, 0, NZ)) for (GSL(y, 0, NY)) for (GSL(x, 0, NX)) {
for (int q = 0; q < 15; q++) {
//int mdx = x - directions[q][0];
//int mdy = y - directions[q][1];
//int mdz = z - directions[q][2];
int mdx = (x - directions[q][0] + NX) % NX;
int mdy = (y - directions[q][1] + NY) % NY;
int mdz = (z - directions[q][2] + NZ) % NZ;
[[maybe_unused]] int iq = inverse_index[q];
lbm.f_new.at(q, x, y, z) = lbm.f_old.at(q, mdx, mdy, mdz);
//lbm.f_new.at(q, x, y, z) = lbm.f_old.at(q, mdx, mdy, mdz)
// * (1.f - inv_tau) + lbm.f_eq(q, mdx, mdy, mdz) * inv_tau;
}
}
}*/
template <int NX, int NY, int NZ>
__global__ void substep2(LBM<NX, NY, NZ> lbm) {
//for (GSL(z, 1, NZ - 1)) for (GSL(y, 1, NY - 1)) for (GSL(x, 1, NX - 1)) {
for (GSL(z, 0, NZ)) for (GSL(y, 0, NY)) for (GSL(x, 0, NX)) {
float m = 0.f;
float vx = 0.f, vy = 0.f, vz = 0.f;
for (int q = 0; q < 15; q++) {
float f = lbm.f_new.at(q, x, y, z);
lbm.f_old.at(q, x, y, z) = f;
vx += f * directions[q][0];
vy += f * directions[q][1];
vz += f * directions[q][2];
m += f;
}
float mscale = 1.f / fmaxf(m, 1e-6f);
vx *= mscale; vy *= mscale; vz *= mscale;
lbm.vel.at(x, y, z) = make_float4(vx, vy, vz, m);
}
}
//__device__ void applybccore(LBM lbm, at
template <int NX, int NY, int NZ>
__global__ void applybc1(LBM<NX, NY, NZ> lbm) {
for (GSL(z, 1, NZ - 1)) for (GSL(y, 1, NY - 1)) {
//for (GSL(z, 0, NZ)) for (GSL(y, 0, NY)) {
lbm.vel.at(0, y, z) = lbm.vel.at(1, y, z);
lbm.vel.at(0, y, z).x = 0.15f;
lbm.vel.at(0, y, z).y = 0.f;
lbm.vel.at(0, y, z).z = 0.f;
for (int q = 0; q < 15; q++) {
lbm.f_old.at(q, 0, y, z) = lbm.f_eq(q, 0, y, z) - lbm.f_eq(q, 1, y, z) + lbm.f_old.at(q, 1, y, z);
}
lbm.vel.at(NX - 1, y, z) = lbm.vel.at(NX - 2, y, z);
for (int q = 0; q < 15; q++) {
lbm.f_old.at(q, NX - 1, y, z) = lbm.f_eq(q, NX - 1, y, z) - lbm.f_eq(q, NX - 2, y, z) + lbm.f_old.at(q, NX - 2, y, z);
}
}
}
template <int NX, int NY, int NZ>
__global__ void applybc2(LBM<NX, NY, NZ> lbm) {
for (GSL(z, 0, NZ)) for (GSL(x, 0, NX)) {
lbm.vel.at(x, 0, z) = lbm.vel.at(x, 1, z);
lbm.vel.at(x, 0, z).x = 0.f;
lbm.vel.at(x, 0, z).y = 0.f;
lbm.vel.at(x, 0, z).z = 0.f;
for (int q = 0; q < 15; q++) {
lbm.f_old.at(q, x, 0, z) = lbm.f_eq(q, x, 0, z) - lbm.f_eq(q, x, 1, z) + lbm.f_old.at(q, x, 1, z);
}
lbm.vel.at(x, NY - 1, z) = lbm.vel.at(x, NY - 2, z);
lbm.vel.at(x, NY - 1, z).x = 0.f;
lbm.vel.at(x, NY - 1, z).y = 0.f;
lbm.vel.at(x, NY - 1, z).z = 0.f;
for (int q = 0; q < 15; q++) {
lbm.f_old.at(q, x, NY - 1, z) = lbm.f_eq(q, x, NY - 1, z) - lbm.f_eq(q, x, NY - 2, z) + lbm.f_old.at(q, x, NY - 2, z);
}
}
}
template <int NX, int NY, int NZ>
__global__ void applybc3(LBM<NX, NY, NZ> lbm) {
for (GSL(y, 0, NY)) for (GSL(x, 0, NX)) {
lbm.vel.at(x, y, 0) = lbm.vel.at(x, y, 1);
lbm.vel.at(x, y, 0).x = 0.f;
lbm.vel.at(x, y, 0).y = 0.f;
lbm.vel.at(x, y, 0).z = 0.f;
for (int q = 0; q < 15; q++) {
lbm.f_old.at(q, x, y, 0) = lbm.f_eq(q, x, y, 0) - lbm.f_eq(q, x, y, 1) + lbm.f_old.at(q, x, y, 1);
}
lbm.vel.at(x, y, NZ - 1) = lbm.vel.at(x, y, NZ - 2);
lbm.vel.at(x, y, NZ - 1).x = 0.f;
lbm.vel.at(x, y, NZ - 1).y = 0.f;
lbm.vel.at(x, y, NZ - 1).z = 0.f;
for (int q = 0; q < 15; q++) {
lbm.f_old.at(q, x, y, NZ - 1) = lbm.f_eq(q, x, y, NZ - 1) - lbm.f_eq(q, x, y, NZ - 2) + lbm.f_old.at(q, x, y, NZ - 2);
}
}
}
template <int NX, int NY, int NZ>
__global__ void applybc4(LBM<NX, NY, NZ> lbm) {
for (GSL(z, 0, NZ)) for (GSL(y, 0, NY)) for (GSL(x, 0, NX)) {
float fx = x * 2.f / NY - 1.f;
float fy = y * 2.f / NY - 1.f;
float fz = z * 2.f / NZ - 1.f;
if (fx * fx + fy * fy + fz * fz >= .065f) {
continue;
}
lbm.vel.at(x, y, z).x = 0.f;
lbm.vel.at(x, y, z).y = 0.f;
lbm.vel.at(x, y, z).z = 0.f;
}
}
template <int NX, int NY, int NZ>
void substep(LBM<NX, NY, NZ> lbm) {
hipLaunchKernelGGL(( substep1), dim3(dim3(NX / 8, NY / 8, NZ / 8)), dim3(dim3(8, 8, 8)), 0, 0, lbm);
hipLaunchKernelGGL(( substep2), dim3(dim3(NX / 8, NY / 8, NZ / 8)), dim3(dim3(8, 8, 8)), 0, 0, lbm);
hipLaunchKernelGGL(( applybc1), dim3(dim3(1, NY / 16, NZ / 16)), dim3(dim3(1, 16, 16)), 0, 0, lbm);
hipLaunchKernelGGL(( applybc2), dim3(dim3(NX / 16, 1, NZ / 16)), dim3(dim3(16, 1, 16)), 0, 0, lbm);
hipLaunchKernelGGL(( applybc3), dim3(dim3(NX / 16, NY / 16, 1)), dim3(dim3(16, 16, 1)), 0, 0, lbm);
hipLaunchKernelGGL(( applybc4), dim3(dim3(NX / 16, NY / 16, NZ / 16)), dim3(dim3(8, 8, 8)), 0, 0, lbm);
}
#define NNX 512
#define NNY 128
#define NNZ 128
template <int NX, int NY, int NZ>
__global__ void render1(float *pixels, LBM<NX, NY, NZ> lbm) {
for (GSL(y, 0, NNY)) for (GSL(x, 0, NNX)) {
float4 v = trilerp<NX, NY, NZ>([&] (auto x, auto y, auto z) {
return lbm.vel.at(x, y, z);
}, x * NX / NNX, y * NY / NNY, NZ / 2, x * (NNX / NX), y % (NNY / NY), 0);
//float val = sqrtf(v.x * v.x + v.y * v.y + v.z * v.z);
float val = 4.f * sqrtf(v.x * v.x + v.y * v.y + v.z * v.z);
//float val = v.x * 4.f;
//float val = v.w * 0.5f;
pixels[y * NNX + x] = val;
}
}
template <int NX, int NY, int NZ>
void render(float *pixels, LBM<NX, NY, NZ> lbm) {
hipLaunchKernelGGL(( render1), dim3(dim3(NNX / 16, NNY / 16, 1)), dim3(dim3(16, 16, 1)), 0, 0, pixels, lbm);
}
LBM<NNX/2, NNY/2, NNZ/2> lbm;
float *pixels;
void initFunc() {
checkCudaErrors(hipMallocManaged(&pixels, NNX * NNY * sizeof(float)));
lbm.allocate();
initialize(lbm, 0);
}
void renderFunc() {
substep(lbm);
render(pixels, lbm);
checkCudaErrors(hipDeviceSynchronize());
}
void displayFunc() {
glClear(GL_COLOR_BUFFER_BIT);
glDrawPixels(NNX, NNY, GL_RED, GL_FLOAT, pixels);
glFlush();
}
#define ITV 0
void timerFunc(int unused) {
renderFunc();
glutPostRedisplay();
glutTimerFunc(ITV, timerFunc, 0);
}
void keyboardFunc(unsigned char key, int x, int y) {
if (key == 27)
exit(0);
}
int main(int argc, char **argv) {
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DEPTH | GLUT_SINGLE | GLUT_RGBA);
glutInitWindowPosition(100, 100);
glutInitWindowSize(NNX, NNY);
glutCreateWindow("GLUT Window");
glutDisplayFunc(displayFunc);
glutKeyboardFunc(keyboardFunc);
initFunc();
renderFunc();
glutTimerFunc(ITV, timerFunc, 0);
glutMainLoop();
}
| 6bf2be2d27f890b6fa72665d2b7fcb9545a3acf8.cu | // https://www.slideserve.com/lars/3d-simulation-of-particle-motion-in-lid-driven-cavity-flow-by-mrt-lbm
#include "helper_cuda.h"
#include "helper_math.h"
#include <cassert>
#include <cstdio>
#include <cmath>
#include <GL/glut.h>
template <int NX, int NY, int NZ, class T>
struct volume {
T *grid;
void allocate() {
size_t size = NX * NY * NZ;
checkCudaErrors(cudaMallocManaged(&grid, size * sizeof(T)));
}
void free() {
checkCudaErrors(cudaFree(grid));
}
__host__ __device__ T &at(int i, int j, int k) const {
return grid[i + j * NX + k * NX * NY];
}
__host__ __device__ auto &at(int c, int i, int j, int k) const {
return at(i, j, k)[c];
}
};
template <int NX, int NY, int NZ, class T, int N>
struct volume_soa {
T *grid;
void allocate() {
size_t size = NX * NY * NZ * N;
checkCudaErrors(cudaMallocManaged(&grid, size * sizeof(T)));
}
void free() {
checkCudaErrors(cudaFree(grid));
}
__host__ __device__ T &at(int c, int i, int j, int k) const {
return grid[i + j * NX + k * NX * NY + c * NX * NY * NZ];
}
};
#define GSL(_, start, end) \
int _ = (start) + blockDim._ * blockIdx._ + threadIdx._; \
_ < (end); _ += blockDim._ * gridDim._
static inline __constant__ const int directions[][3] = {{0,0,0},{1,0,0},{-1,0,0},{0,1,0},{0,-1,0},{0,0,1},{0,0,-1},{1,1,1},{-1,-1,-1},{1,1,-1},{-1,-1,1},{1,-1,1},{-1,1,-1},{-1,1,1},{1,-1,-1}};
static inline __constant__ const float weights[] = {2.f/9.f, 1.f/9.f, 1.f/9.f, 1.f/9.f, 1.f/9.f, 1.f/9.f, 1.f/9.f,1.f/72.f, 1.f/72.f, 1.f/72.f, 1.f/72.f, 1.f/72.f, 1.f/72.f, 1.f/72.f, 1.f/72.f};
[[maybe_unused]] static inline __constant__ const int inverse_index[] = {0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13};
static_assert(sizeof(weights) / sizeof(weights[0]) == 15);
static inline const float niu = 0.005f;
static inline const float tau = 3.f * niu + 0.5f;
static inline const float inv_tau = 1.f / tau;
template <int NX, int NY, int NZ>
struct LBM {
volume<NX, NY, NZ, float4> vel;
volume_soa<NX, NY, NZ, float, 16> f_new;
volume_soa<NX, NY, NZ, float, 16> f_old;
void allocate() {
vel.allocate();
f_new.allocate();
f_old.allocate();
}
__device__ float f_eq(int q, int x, int y, int z) {
float4 v = vel.at(x, y, z);
float eu = v.x * directions[q][0]
+ v.y * directions[q][1] + v.z * directions[q][2];
float uv = v.x * v.x + v.y * v.y + v.z * v.z;
float term = 1.f + 3.f * eu + 4.5f * eu * eu - 1.5f * uv;
float feq = weights[q] * v.w * term;
return feq;
}
};
template <int NX, int NY, int NZ>
__global__ void initialize1(LBM<NX, NY, NZ> lbm, int type) {
for (GSL(z, 0, NZ)) for (GSL(y, 0, NY)) for (GSL(x, 0, NX)) {
lbm.vel.at(x, y, z) = make_float4(0.f, 0.f, 0.f, 1.f);
}
}
template <int NX, int NY, int NZ>
__global__ void initialize2(LBM<NX, NY, NZ> lbm) {
for (GSL(z, 0, NZ)) for (GSL(y, 0, NY)) for (GSL(x, 0, NX)) {
for (int q = 0; q < 15; q++) {
float f = lbm.f_eq(q, x, y, z);
lbm.f_new.at(q, x, y, z) = f;
lbm.f_old.at(q, x, y, z) = f;
}
}
}
template <int NX, int NY, int NZ>
void initialize(LBM<NX, NY, NZ> lbm, int type) {
initialize1<<<dim3(NX / 8, NY / 8, NZ / 8), dim3(8, 8, 8)>>>(lbm, type);
initialize2<<<dim3(NX / 8, NY / 8, NZ / 8), dim3(8, 8, 8)>>>(lbm);
}
template <int NX, int NY, int NZ, class T>
__device__ auto trilerp(T const &t, int x, int y, int z, int dx, int dy, int dz) {
float c0 = 0.85f;
float c1 = 1.f - c0;
float x0 = dx ? c1 : c0;
float x1 = dx ? c0 : c1;
float y0 = dy ? c1 : c0;
float y1 = dy ? c0 : c1;
float z0 = dz ? c1 : c0;
float z1 = dz ? c0 : c1;
int x_ = x, y_ = y, z_ = z;
if (x_ < NX - 1) x_++;
if (y_ < NY - 1) y_++;
if (z_ < NZ - 1) z_++;
if (!dx && x > 0) x--;
if (!dy && y > 0) y--;
if (!dz && z > 0) z--;
return x0 * y0 * z0 * t(x, y, z)
+ x0 * y0 * z1 * t(x, y, z_)
+ x0 * y1 * z0 * t(x, y_, z)
+ x0 * y1 * z1 * t(x, y_, z_)
+ x1 * y0 * z0 * t(x_, y, z)
+ x1 * y0 * z1 * t(x_, y, z_)
+ x1 * y1 * z0 * t(x_, y_, z)
+ x1 * y1 * z1 * t(x_, y_, z_);
}
template <int NX, int NY, int NZ>
__global__ void substep1(LBM<NX, NY, NZ> lbm) {
//for (GSL(z, 1, NZ - 1)) for (GSL(y, 1, NY - 1)) for (GSL(x, 1, NX - 1)) {
for (GSL(z, 0, NZ)) for (GSL(y, 0, NY)) for (GSL(x, 0, NX)) {
for (int q = 0; q < 15; q++) {
//int mdx = x - directions[q][0];
//int mdy = y - directions[q][1];
//int mdz = z - directions[q][2];
int mdx = (x - directions[q][0] + NX) % NX;
int mdy = (y - directions[q][1] + NY) % NY;
int mdz = (z - directions[q][2] + NZ) % NZ;
lbm.f_new.at(q, x, y, z) = lbm.f_old.at(q, mdx, mdy, mdz)
* (1.f - inv_tau) + lbm.f_eq(q, mdx, mdy, mdz) * inv_tau;
}
}
}
/*__global__ void substep11(LBM lbm) {
for (GSL(z, 0, NZ)) for (GSL(y, 0, NY)) for (GSL(x, 0, NX)) {
self.feq
}
}
__global__ void substep12(LBM lbm) {
//for (GSL(z, 1, NZ - 1)) for (GSL(y, 1, NY - 1)) for (GSL(x, 1, NX - 1)) {
for (GSL(z, 0, NZ)) for (GSL(y, 0, NY)) for (GSL(x, 0, NX)) {
for (int q = 0; q < 15; q++) {
//int mdx = x - directions[q][0];
//int mdy = y - directions[q][1];
//int mdz = z - directions[q][2];
int mdx = (x - directions[q][0] + NX) % NX;
int mdy = (y - directions[q][1] + NY) % NY;
int mdz = (z - directions[q][2] + NZ) % NZ;
[[maybe_unused]] int iq = inverse_index[q];
lbm.f_new.at(q, x, y, z) = lbm.f_old.at(q, mdx, mdy, mdz);
//lbm.f_new.at(q, x, y, z) = lbm.f_old.at(q, mdx, mdy, mdz)
// * (1.f - inv_tau) + lbm.f_eq(q, mdx, mdy, mdz) * inv_tau;
}
}
}*/
template <int NX, int NY, int NZ>
__global__ void substep2(LBM<NX, NY, NZ> lbm) {
//for (GSL(z, 1, NZ - 1)) for (GSL(y, 1, NY - 1)) for (GSL(x, 1, NX - 1)) {
for (GSL(z, 0, NZ)) for (GSL(y, 0, NY)) for (GSL(x, 0, NX)) {
float m = 0.f;
float vx = 0.f, vy = 0.f, vz = 0.f;
for (int q = 0; q < 15; q++) {
float f = lbm.f_new.at(q, x, y, z);
lbm.f_old.at(q, x, y, z) = f;
vx += f * directions[q][0];
vy += f * directions[q][1];
vz += f * directions[q][2];
m += f;
}
float mscale = 1.f / fmaxf(m, 1e-6f);
vx *= mscale; vy *= mscale; vz *= mscale;
lbm.vel.at(x, y, z) = make_float4(vx, vy, vz, m);
}
}
//__device__ void applybccore(LBM lbm, at
template <int NX, int NY, int NZ>
__global__ void applybc1(LBM<NX, NY, NZ> lbm) {
for (GSL(z, 1, NZ - 1)) for (GSL(y, 1, NY - 1)) {
//for (GSL(z, 0, NZ)) for (GSL(y, 0, NY)) {
lbm.vel.at(0, y, z) = lbm.vel.at(1, y, z);
lbm.vel.at(0, y, z).x = 0.15f;
lbm.vel.at(0, y, z).y = 0.f;
lbm.vel.at(0, y, z).z = 0.f;
for (int q = 0; q < 15; q++) {
lbm.f_old.at(q, 0, y, z) = lbm.f_eq(q, 0, y, z) - lbm.f_eq(q, 1, y, z) + lbm.f_old.at(q, 1, y, z);
}
lbm.vel.at(NX - 1, y, z) = lbm.vel.at(NX - 2, y, z);
for (int q = 0; q < 15; q++) {
lbm.f_old.at(q, NX - 1, y, z) = lbm.f_eq(q, NX - 1, y, z) - lbm.f_eq(q, NX - 2, y, z) + lbm.f_old.at(q, NX - 2, y, z);
}
}
}
template <int NX, int NY, int NZ>
__global__ void applybc2(LBM<NX, NY, NZ> lbm) {
for (GSL(z, 0, NZ)) for (GSL(x, 0, NX)) {
lbm.vel.at(x, 0, z) = lbm.vel.at(x, 1, z);
lbm.vel.at(x, 0, z).x = 0.f;
lbm.vel.at(x, 0, z).y = 0.f;
lbm.vel.at(x, 0, z).z = 0.f;
for (int q = 0; q < 15; q++) {
lbm.f_old.at(q, x, 0, z) = lbm.f_eq(q, x, 0, z) - lbm.f_eq(q, x, 1, z) + lbm.f_old.at(q, x, 1, z);
}
lbm.vel.at(x, NY - 1, z) = lbm.vel.at(x, NY - 2, z);
lbm.vel.at(x, NY - 1, z).x = 0.f;
lbm.vel.at(x, NY - 1, z).y = 0.f;
lbm.vel.at(x, NY - 1, z).z = 0.f;
for (int q = 0; q < 15; q++) {
lbm.f_old.at(q, x, NY - 1, z) = lbm.f_eq(q, x, NY - 1, z) - lbm.f_eq(q, x, NY - 2, z) + lbm.f_old.at(q, x, NY - 2, z);
}
}
}
template <int NX, int NY, int NZ>
__global__ void applybc3(LBM<NX, NY, NZ> lbm) {
for (GSL(y, 0, NY)) for (GSL(x, 0, NX)) {
lbm.vel.at(x, y, 0) = lbm.vel.at(x, y, 1);
lbm.vel.at(x, y, 0).x = 0.f;
lbm.vel.at(x, y, 0).y = 0.f;
lbm.vel.at(x, y, 0).z = 0.f;
for (int q = 0; q < 15; q++) {
lbm.f_old.at(q, x, y, 0) = lbm.f_eq(q, x, y, 0) - lbm.f_eq(q, x, y, 1) + lbm.f_old.at(q, x, y, 1);
}
lbm.vel.at(x, y, NZ - 1) = lbm.vel.at(x, y, NZ - 2);
lbm.vel.at(x, y, NZ - 1).x = 0.f;
lbm.vel.at(x, y, NZ - 1).y = 0.f;
lbm.vel.at(x, y, NZ - 1).z = 0.f;
for (int q = 0; q < 15; q++) {
lbm.f_old.at(q, x, y, NZ - 1) = lbm.f_eq(q, x, y, NZ - 1) - lbm.f_eq(q, x, y, NZ - 2) + lbm.f_old.at(q, x, y, NZ - 2);
}
}
}
template <int NX, int NY, int NZ>
__global__ void applybc4(LBM<NX, NY, NZ> lbm) {
for (GSL(z, 0, NZ)) for (GSL(y, 0, NY)) for (GSL(x, 0, NX)) {
float fx = x * 2.f / NY - 1.f;
float fy = y * 2.f / NY - 1.f;
float fz = z * 2.f / NZ - 1.f;
if (fx * fx + fy * fy + fz * fz >= .065f) {
continue;
}
lbm.vel.at(x, y, z).x = 0.f;
lbm.vel.at(x, y, z).y = 0.f;
lbm.vel.at(x, y, z).z = 0.f;
}
}
template <int NX, int NY, int NZ>
void substep(LBM<NX, NY, NZ> lbm) {
substep1<<<dim3(NX / 8, NY / 8, NZ / 8), dim3(8, 8, 8)>>>(lbm);
substep2<<<dim3(NX / 8, NY / 8, NZ / 8), dim3(8, 8, 8)>>>(lbm);
applybc1<<<dim3(1, NY / 16, NZ / 16), dim3(1, 16, 16)>>>(lbm);
applybc2<<<dim3(NX / 16, 1, NZ / 16), dim3(16, 1, 16)>>>(lbm);
applybc3<<<dim3(NX / 16, NY / 16, 1), dim3(16, 16, 1)>>>(lbm);
applybc4<<<dim3(NX / 16, NY / 16, NZ / 16), dim3(8, 8, 8)>>>(lbm);
}
#define NNX 512
#define NNY 128
#define NNZ 128
template <int NX, int NY, int NZ>
__global__ void render1(float *pixels, LBM<NX, NY, NZ> lbm) {
for (GSL(y, 0, NNY)) for (GSL(x, 0, NNX)) {
float4 v = trilerp<NX, NY, NZ>([&] (auto x, auto y, auto z) {
return lbm.vel.at(x, y, z);
}, x * NX / NNX, y * NY / NNY, NZ / 2, x * (NNX / NX), y % (NNY / NY), 0);
//float val = sqrtf(v.x * v.x + v.y * v.y + v.z * v.z);
float val = 4.f * sqrtf(v.x * v.x + v.y * v.y + v.z * v.z);
//float val = v.x * 4.f;
//float val = v.w * 0.5f;
pixels[y * NNX + x] = val;
}
}
template <int NX, int NY, int NZ>
void render(float *pixels, LBM<NX, NY, NZ> lbm) {
render1<<<dim3(NNX / 16, NNY / 16, 1), dim3(16, 16, 1)>>>(pixels, lbm);
}
LBM<NNX/2, NNY/2, NNZ/2> lbm;
float *pixels;
void initFunc() {
checkCudaErrors(cudaMallocManaged(&pixels, NNX * NNY * sizeof(float)));
lbm.allocate();
initialize(lbm, 0);
}
void renderFunc() {
substep(lbm);
render(pixels, lbm);
checkCudaErrors(cudaDeviceSynchronize());
}
void displayFunc() {
glClear(GL_COLOR_BUFFER_BIT);
glDrawPixels(NNX, NNY, GL_RED, GL_FLOAT, pixels);
glFlush();
}
#define ITV 0
void timerFunc(int unused) {
renderFunc();
glutPostRedisplay();
glutTimerFunc(ITV, timerFunc, 0);
}
void keyboardFunc(unsigned char key, int x, int y) {
if (key == 27)
exit(0);
}
int main(int argc, char **argv) {
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DEPTH | GLUT_SINGLE | GLUT_RGBA);
glutInitWindowPosition(100, 100);
glutInitWindowSize(NNX, NNY);
glutCreateWindow("GLUT Window");
glutDisplayFunc(displayFunc);
glutKeyboardFunc(keyboardFunc);
initFunc();
renderFunc();
glutTimerFunc(ITV, timerFunc, 0);
glutMainLoop();
}
|
92259ff15429f6d27c3ee29724050cf8bf2f621c.hip | // !!! This is a file automatically generated by hipify!!!
// L1000 peak deconvolution based on Bayesian analysis
//
// Copyright 2019 Tianhuan Lu
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <vector>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "dpeak.h"
// maximum number of reads per bead color
#define CUDA_PROBPEAK_STRIDE 512
__global__ void probpeak_kernel(const float bg1m, const float* values, const float* peakgrid, float* probpeak)
{
int g = blockIdx.x;
int i = threadIdx.x;
float mean = peakgrid[g];
float dev = PEAK_WIDTH_MODEL(mean);
float inv_dev = 1.0f / dev;
float prob = (values[i] - mean) * inv_dev;
probpeak[g * CUDA_PROBPEAK_STRIDE + i] = bg1m * 3.30797f * powf(3.0f + prob * prob, -2.0f) * inv_dev;
}
__global__ void prob_kernel(const unsigned int num_values, unsigned int n_reduction,
const float inv_num_values, const unsigned int grid_size,
const float dp52_ratio, const float dp53_ratio,
const float* probpeak, const float* probbg, float* intparams)
{
int e52 = blockIdx.x;
int e53 = blockIdx.y;
int r = threadIdx.x;
const float* dp52prob = probpeak + (e52 * CUDA_PROBPEAK_STRIDE);
const float* dp53prob = probpeak + (e53 * CUDA_PROBPEAK_STRIDE);
float* totalparams = intparams + (e52 * grid_size + e53) * 3;
extern __shared__ float params[];
float base = dp52_ratio * dp52prob[r] + dp53_ratio * dp53prob[r] + probbg[r];
float diff = inv_num_values * (dp52prob[r] - dp53prob[r]) / base;
params[r * 3 + 0] = __logf(base);
params[r * 3 + 1] = diff;
params[r * 3 + 2] = diff * diff;
__syncthreads();
if (r < int(num_values - n_reduction))
{
params[r * 3 + 0] += params[(r + n_reduction) * 3 + 0];
params[r * 3 + 1] += params[(r + n_reduction) * 3 + 1];
params[r * 3 + 2] += params[(r + n_reduction) * 3 + 2];
}
__syncthreads();
n_reduction >>= 1;
for (; n_reduction > 0; n_reduction >>= 1)
{
if (r < n_reduction)
{
params[r * 3 + 0] += params[(r + n_reduction) * 3 + 0];
params[r * 3 + 1] += params[(r + n_reduction) * 3 + 1];
params[r * 3 + 2] += params[(r + n_reduction) * 3 + 2];
}
__syncthreads();
}
if (r == 0)
{
totalparams[0] = params[0];
totalparams[1] = params[1];
totalparams[2] = params[2];
}
}
__global__ void reduce_kernel(const float binomial_var, const float* intparams, float* likelihood)
{
int e52 = blockIdx.x;
int e53 = threadIdx.x;
int stride = blockDim.x;
const float* params_ptr = intparams + (e52 * stride + e53) * 3;
float* likelihood_ptr = likelihood + (e52 * stride + e53);
float a = params_ptr[0];
float b = params_ptr[1];
float c = params_ptr[2];
*likelihood_ptr = a + (b * b * binomial_var) / (2.0f + 2.0f * c * binomial_var)
- 0.5f * __logf(1.0f + c * binomial_var);
}
void cudadpeak_single(
const float dp52_ratio, const float bg,
const std::vector<float>& values, float* d_values,
float* h_probpeak, float* d_probpeak,
std::vector<float> probbg, float* h_probbg, float* d_probbg,
const std::vector<float>& peakgrid, float* d_peakgrid,
float* d_intparams, float* h_likelihood, float* d_likelihood)
{
const size_t num_values = values.size();
const size_t grid_size = peakgrid.size();
if (num_values == 0)
{
std::fill_n(h_likelihood, grid_size * grid_size, 0.0f);
return;
}
for (size_t i = 0; i < num_values; ++i)
h_probbg[i] = probbg[i] * bg;
hipMemcpy(d_probbg, h_probbg, sizeof(float) * num_values, hipMemcpyHostToDevice);
hipMemcpy(d_values, values.data(), sizeof(float) * num_values, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( probpeak_kernel), dim3(grid_size), dim3(num_values), 0, 0, 1.0f - bg, d_values, d_peakgrid, d_probpeak);
float inv_num_values = 1.0f / float(num_values);
float binomial_var = float(num_values) * dp52_ratio * (1.0f - dp52_ratio);
unsigned int n_reduction = 1;
while (n_reduction * 2 < num_values)
n_reduction *= 2;
dim3 dim_grid(grid_size, grid_size);
dim3 dim_block(num_values);
size_t shared_memory_size = sizeof(float) * num_values * 3;
hipLaunchKernelGGL(( prob_kernel), dim3(dim_grid), dim3(dim_block), shared_memory_size, 0,
int(num_values), n_reduction, inv_num_values, int(grid_size),
dp52_ratio, 1.0f - dp52_ratio, d_probpeak, d_probbg, d_intparams);
std::vector<float> h_intparams(10000);
hipMemcpy(h_intparams.data(), d_intparams, h_intparams.size() * 4, hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( reduce_kernel), dim3(grid_size), dim3(grid_size), 0, 0, binomial_var, d_intparams, d_likelihood);
hipMemcpy(h_likelihood, d_likelihood, sizeof(float) * grid_size * grid_size, hipMemcpyDeviceToHost);
}
std::vector<float> dpeak_batch(
const std::vector<float>& dp52_ratio,
const std::vector<float>& bg,
const std::vector<std::vector<float>>& values_batch,
const std::vector<std::vector<float>>& probbg_batch,
const std::vector<float>& peakgrid)
{
const size_t batch_size = values_batch.size();
const size_t grid_size = peakgrid.size();
std::vector<float> probpeak(grid_size * CUDA_PROBPEAK_STRIDE);
std::vector<float> probbg(CUDA_PROBPEAK_STRIDE);
std::vector<float> likelihood_batch(batch_size * grid_size * grid_size);
float* h_probpeak = probpeak.data();
float* h_probbg = probbg.data();
float* d_probpeak;
float* d_values;
float* d_probbg;
float* d_peakgrid;
float* d_intparams;
float* d_likelihood;
hipMalloc(&d_probpeak, sizeof(float) * grid_size * CUDA_PROBPEAK_STRIDE);
hipMalloc(&d_values, sizeof(float) * CUDA_PROBPEAK_STRIDE);
hipMalloc(&d_probbg, sizeof(float) * CUDA_PROBPEAK_STRIDE);
hipMalloc(&d_peakgrid, sizeof(float) * grid_size);
hipMalloc(&d_intparams, sizeof(float) * grid_size * grid_size * 3);
hipMalloc(&d_likelihood, sizeof(float) * grid_size * grid_size);
hipMemcpy(d_peakgrid, peakgrid.data(), sizeof(float) * grid_size, hipMemcpyHostToDevice);
for (size_t b = 0; b < batch_size; ++b)
{
float* h_likelihood = likelihood_batch.data() + b * grid_size * grid_size;
cudadpeak_single(dp52_ratio[b], bg[b],
values_batch[b], d_values,
h_probpeak, d_probpeak,
probbg_batch[b], h_probbg, d_probbg,
peakgrid, d_peakgrid,
d_intparams, h_likelihood, d_likelihood);
}
hipFree(d_likelihood);
hipFree(d_intparams);
hipFree(d_peakgrid);
hipFree(d_probbg);
hipFree(d_values);
hipFree(d_probpeak);
return likelihood_batch;
}
| 92259ff15429f6d27c3ee29724050cf8bf2f621c.cu | // L1000 peak deconvolution based on Bayesian analysis
//
// Copyright 2019 Tianhuan Lu
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <vector>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "dpeak.h"
// maximum number of reads per bead color
#define CUDA_PROBPEAK_STRIDE 512
__global__ void probpeak_kernel(const float bg1m, const float* values, const float* peakgrid, float* probpeak)
{
int g = blockIdx.x;
int i = threadIdx.x;
float mean = peakgrid[g];
float dev = PEAK_WIDTH_MODEL(mean);
float inv_dev = 1.0f / dev;
float prob = (values[i] - mean) * inv_dev;
probpeak[g * CUDA_PROBPEAK_STRIDE + i] = bg1m * 3.30797f * powf(3.0f + prob * prob, -2.0f) * inv_dev;
}
__global__ void prob_kernel(const unsigned int num_values, unsigned int n_reduction,
const float inv_num_values, const unsigned int grid_size,
const float dp52_ratio, const float dp53_ratio,
const float* probpeak, const float* probbg, float* intparams)
{
int e52 = blockIdx.x;
int e53 = blockIdx.y;
int r = threadIdx.x;
const float* dp52prob = probpeak + (e52 * CUDA_PROBPEAK_STRIDE);
const float* dp53prob = probpeak + (e53 * CUDA_PROBPEAK_STRIDE);
float* totalparams = intparams + (e52 * grid_size + e53) * 3;
extern __shared__ float params[];
float base = dp52_ratio * dp52prob[r] + dp53_ratio * dp53prob[r] + probbg[r];
float diff = inv_num_values * (dp52prob[r] - dp53prob[r]) / base;
params[r * 3 + 0] = __logf(base);
params[r * 3 + 1] = diff;
params[r * 3 + 2] = diff * diff;
__syncthreads();
if (r < int(num_values - n_reduction))
{
params[r * 3 + 0] += params[(r + n_reduction) * 3 + 0];
params[r * 3 + 1] += params[(r + n_reduction) * 3 + 1];
params[r * 3 + 2] += params[(r + n_reduction) * 3 + 2];
}
__syncthreads();
n_reduction >>= 1;
for (; n_reduction > 0; n_reduction >>= 1)
{
if (r < n_reduction)
{
params[r * 3 + 0] += params[(r + n_reduction) * 3 + 0];
params[r * 3 + 1] += params[(r + n_reduction) * 3 + 1];
params[r * 3 + 2] += params[(r + n_reduction) * 3 + 2];
}
__syncthreads();
}
if (r == 0)
{
totalparams[0] = params[0];
totalparams[1] = params[1];
totalparams[2] = params[2];
}
}
__global__ void reduce_kernel(const float binomial_var, const float* intparams, float* likelihood)
{
int e52 = blockIdx.x;
int e53 = threadIdx.x;
int stride = blockDim.x;
const float* params_ptr = intparams + (e52 * stride + e53) * 3;
float* likelihood_ptr = likelihood + (e52 * stride + e53);
float a = params_ptr[0];
float b = params_ptr[1];
float c = params_ptr[2];
*likelihood_ptr = a + (b * b * binomial_var) / (2.0f + 2.0f * c * binomial_var)
- 0.5f * __logf(1.0f + c * binomial_var);
}
void cudadpeak_single(
const float dp52_ratio, const float bg,
const std::vector<float>& values, float* d_values,
float* h_probpeak, float* d_probpeak,
std::vector<float> probbg, float* h_probbg, float* d_probbg,
const std::vector<float>& peakgrid, float* d_peakgrid,
float* d_intparams, float* h_likelihood, float* d_likelihood)
{
const size_t num_values = values.size();
const size_t grid_size = peakgrid.size();
if (num_values == 0)
{
std::fill_n(h_likelihood, grid_size * grid_size, 0.0f);
return;
}
for (size_t i = 0; i < num_values; ++i)
h_probbg[i] = probbg[i] * bg;
cudaMemcpy(d_probbg, h_probbg, sizeof(float) * num_values, cudaMemcpyHostToDevice);
cudaMemcpy(d_values, values.data(), sizeof(float) * num_values, cudaMemcpyHostToDevice);
probpeak_kernel<<<grid_size, num_values>>>(1.0f - bg, d_values, d_peakgrid, d_probpeak);
float inv_num_values = 1.0f / float(num_values);
float binomial_var = float(num_values) * dp52_ratio * (1.0f - dp52_ratio);
unsigned int n_reduction = 1;
while (n_reduction * 2 < num_values)
n_reduction *= 2;
dim3 dim_grid(grid_size, grid_size);
dim3 dim_block(num_values);
size_t shared_memory_size = sizeof(float) * num_values * 3;
prob_kernel<<<dim_grid, dim_block, shared_memory_size>>>(
int(num_values), n_reduction, inv_num_values, int(grid_size),
dp52_ratio, 1.0f - dp52_ratio, d_probpeak, d_probbg, d_intparams);
std::vector<float> h_intparams(10000);
cudaMemcpy(h_intparams.data(), d_intparams, h_intparams.size() * 4, cudaMemcpyDeviceToHost);
reduce_kernel<<<grid_size, grid_size>>>(binomial_var, d_intparams, d_likelihood);
cudaMemcpy(h_likelihood, d_likelihood, sizeof(float) * grid_size * grid_size, cudaMemcpyDeviceToHost);
}
std::vector<float> dpeak_batch(
const std::vector<float>& dp52_ratio,
const std::vector<float>& bg,
const std::vector<std::vector<float>>& values_batch,
const std::vector<std::vector<float>>& probbg_batch,
const std::vector<float>& peakgrid)
{
const size_t batch_size = values_batch.size();
const size_t grid_size = peakgrid.size();
std::vector<float> probpeak(grid_size * CUDA_PROBPEAK_STRIDE);
std::vector<float> probbg(CUDA_PROBPEAK_STRIDE);
std::vector<float> likelihood_batch(batch_size * grid_size * grid_size);
float* h_probpeak = probpeak.data();
float* h_probbg = probbg.data();
float* d_probpeak;
float* d_values;
float* d_probbg;
float* d_peakgrid;
float* d_intparams;
float* d_likelihood;
cudaMalloc(&d_probpeak, sizeof(float) * grid_size * CUDA_PROBPEAK_STRIDE);
cudaMalloc(&d_values, sizeof(float) * CUDA_PROBPEAK_STRIDE);
cudaMalloc(&d_probbg, sizeof(float) * CUDA_PROBPEAK_STRIDE);
cudaMalloc(&d_peakgrid, sizeof(float) * grid_size);
cudaMalloc(&d_intparams, sizeof(float) * grid_size * grid_size * 3);
cudaMalloc(&d_likelihood, sizeof(float) * grid_size * grid_size);
cudaMemcpy(d_peakgrid, peakgrid.data(), sizeof(float) * grid_size, cudaMemcpyHostToDevice);
for (size_t b = 0; b < batch_size; ++b)
{
float* h_likelihood = likelihood_batch.data() + b * grid_size * grid_size;
cudadpeak_single(dp52_ratio[b], bg[b],
values_batch[b], d_values,
h_probpeak, d_probpeak,
probbg_batch[b], h_probbg, d_probbg,
peakgrid, d_peakgrid,
d_intparams, h_likelihood, d_likelihood);
}
cudaFree(d_likelihood);
cudaFree(d_intparams);
cudaFree(d_peakgrid);
cudaFree(d_probbg);
cudaFree(d_values);
cudaFree(d_probpeak);
return likelihood_batch;
}
|
6d03773d8339d170b74c7fac9d2b606e1bd7e20c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include<vector>
#include<utility>
#include<algorithm>
#include <cstdio>
#include <fstream>
#include <math.h>
#include "kernels/EdgeCentric_phase1.h"
#include "kernels/EdgeCentric_phase2.h"
using namespace std;
#define BLOCK_DIM 256
float df = 0.85f;
struct Graph{
int N; // number of nodes
int countSink; //number of sink nodes
int E;
int *outdegree; // contains outdegree of all nodes
int *indegree; //contains indegree of all nodes
int *sinkArray;
int *edgeArray1;
int *edgeArray2;
float *pr; // page rank values
};
Graph* buildGraph(vector<pair<int,int>>& edges, int E,int V)
{
Graph* G = new Graph();
G->N = V;
G->E = E;
G-> outdegree = new int[V]();
G->edgeArray1 = new int[E];
G->edgeArray2 = new int[E];
G->pr = new float[V];
for(int i=0;i<E;i++){
G->outdegree[edges[i].first]++;
G->edgeArray1[i] = edges[i].first;
G->edgeArray2[i] = edges[i].second;
}
G->countSink = 0;
for(int i=0;i<V;i++){
if (G->outdegree[i] == 0){
G->countSink ++;
}
}
G->sinkArray = new int[G->countSink]();
int x = 0;
for( int i = 0; i < V; i++){
if (G->outdegree[i] == 0){
G->sinkArray[x] = i;
x+= 1;
}
}
return G;
}
Graph* readgraph(const char* file){
FILE *in_file = fopen(file, "r");
int E,V = 0;
fscanf(in_file, "%d %d", &E,&V);
vector<pair<int,int>> edges(E);
for (auto& e:edges) {
fscanf(in_file, "%d %d", &e.first, &e.second);
}
fclose(in_file);
return buildGraph(edges, E, V);
}
// Stores the page rank values of the given Graph structure in output.txt
void storePageRank(Graph* graph,const char* file)
{
FILE *out_file = fopen(file, "w");
for (int i=0; i<graph->N; i++) {
fprintf(out_file, "%f\n", graph->pr[i]);
}
fclose(out_file);
}
// Initialises the page rank values of the Graph structure to 1
void initialisePageRank(Graph *graph)
{
for (int i=0; i<graph->N; i++) {
graph->pr[i] = 1.0f;
}
}
void PageRank_edge_centric(Graph* G,int iter,float df,int blocksPerGrid,int threadsPerBlock){
hipError_t err = hipSuccess;
cout<<"Initialize arrays in device memory\n";
int* d_outdegreeArray = NULL;
err = hipMalloc((int **)&d_outdegreeArray, (G->N)*sizeof(int));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector outdegreeArray (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate the device edgeArray1
int* d_edgeArray1 = NULL;
err = hipMalloc((int **)&d_edgeArray1, G->E*sizeof(int));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector edgeArray1 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate the device edgeArray2
int* d_edgeArray2 = NULL;
err = hipMalloc((int **)&d_edgeArray2, G->E*sizeof(int));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector edgeArray2 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate the device sinkArray
int* d_sinkArray = NULL;
err = hipMalloc((int **)&d_sinkArray, G->countSink * sizeof(int));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector sinkArray (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate the device oldpr
float* d_oldpr = NULL;
err = hipMalloc((float **)&d_oldpr, G->N*sizeof(float));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector oldpr (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate the device newpr output array
float* d_newpr = NULL;
err = hipMalloc((float **)&d_newpr, G->N*sizeof(float));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector newpr (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//copy input data from host to device
cout<<"Copy input data from host memory to CUDA device memory\n";
err = hipMemcpy(d_outdegreeArray, G->outdegree, (G->N)*sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector outdegree from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_edgeArray1, G->edgeArray1, G->E*sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector edgeArray1 from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_edgeArray2, G->edgeArray2, G->E*sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector edgeArray2 from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_sinkArray, G->sinkArray, G->countSink * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector sinkArray from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_oldpr, G->pr, G->N*sizeof(float), hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector pr from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// grid and block dimension
dim3 grid(blocksPerGrid,1,1);
dim3 block(threadsPerBlock,1,1);
while(iter--){
// Launch the PageRank Update CUDA Kernel
hipLaunchKernelGGL(( EdgeCentric_phase1), dim3(grid), dim3(block), 0, 0, d_newpr,d_oldpr,d_sinkArray, G->N,G->countSink,df);
hipLaunchKernelGGL(( EdgeCentric_phase2), dim3(grid), dim3(block), 0, 0, d_outdegreeArray, d_edgeArray1, d_edgeArray2,
d_oldpr, d_newpr, G->E, df);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch UpdatePageRank kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//update the oldpr array
err = hipMemcpy(d_oldpr,d_newpr,G->N*sizeof(float),hipMemcpyDeviceToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy newpr array from device to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
err = hipMemcpy(G->pr,d_oldpr,G->N*sizeof(float),hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy pr array from device to Host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//free up allocated memory
err = hipFree(d_edgeArray1);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector edgeArray1 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_edgeArray2);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector edgeArray2 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_oldpr);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector oldpr (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_newpr);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector newpr (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_outdegreeArray);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector outdegreeArray (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_sinkArray);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector sinkArray (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Reset the device
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int main(){
char input_file[] = "./Dataset/amazon.txt";
char output_file[] = "edgeCentric_output.txt";
Graph* G = readgraph(input_file);
initialisePageRank(G);
int threadsPerBlock = BLOCK_DIM;
int blocksPerGrid = (G->N+threadsPerBlock-1)/threadsPerBlock;
PageRank_edge_centric(G,10000,df,blocksPerGrid,threadsPerBlock);
cout<<"PageRank calculation done!!"<<endl;
storePageRank(G,output_file);
return 0;
}
| 6d03773d8339d170b74c7fac9d2b606e1bd7e20c.cu | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include<vector>
#include<utility>
#include<algorithm>
#include <cstdio>
#include <fstream>
#include <math.h>
#include "kernels/EdgeCentric_phase1.h"
#include "kernels/EdgeCentric_phase2.h"
using namespace std;
#define BLOCK_DIM 256
float df = 0.85f;
struct Graph{
int N; // number of nodes
int countSink; //number of sink nodes
int E;
int *outdegree; // contains outdegree of all nodes
int *indegree; //contains indegree of all nodes
int *sinkArray;
int *edgeArray1;
int *edgeArray2;
float *pr; // page rank values
};
Graph* buildGraph(vector<pair<int,int>>& edges, int E,int V)
{
Graph* G = new Graph();
G->N = V;
G->E = E;
G-> outdegree = new int[V]();
G->edgeArray1 = new int[E];
G->edgeArray2 = new int[E];
G->pr = new float[V];
for(int i=0;i<E;i++){
G->outdegree[edges[i].first]++;
G->edgeArray1[i] = edges[i].first;
G->edgeArray2[i] = edges[i].second;
}
G->countSink = 0;
for(int i=0;i<V;i++){
if (G->outdegree[i] == 0){
G->countSink ++;
}
}
G->sinkArray = new int[G->countSink]();
int x = 0;
for( int i = 0; i < V; i++){
if (G->outdegree[i] == 0){
G->sinkArray[x] = i;
x+= 1;
}
}
return G;
}
Graph* readgraph(const char* file){
FILE *in_file = fopen(file, "r");
int E,V = 0;
fscanf(in_file, "%d %d", &E,&V);
vector<pair<int,int>> edges(E);
for (auto& e:edges) {
fscanf(in_file, "%d %d", &e.first, &e.second);
}
fclose(in_file);
return buildGraph(edges, E, V);
}
// Stores the page rank values of the given Graph structure in output.txt
void storePageRank(Graph* graph,const char* file)
{
FILE *out_file = fopen(file, "w");
for (int i=0; i<graph->N; i++) {
fprintf(out_file, "%f\n", graph->pr[i]);
}
fclose(out_file);
}
// Initialises the page rank values of the Graph structure to 1
void initialisePageRank(Graph *graph)
{
for (int i=0; i<graph->N; i++) {
graph->pr[i] = 1.0f;
}
}
void PageRank_edge_centric(Graph* G,int iter,float df,int blocksPerGrid,int threadsPerBlock){
cudaError_t err = cudaSuccess;
cout<<"Initialize arrays in device memory\n";
int* d_outdegreeArray = NULL;
err = cudaMalloc((int **)&d_outdegreeArray, (G->N)*sizeof(int));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector outdegreeArray (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate the device edgeArray1
int* d_edgeArray1 = NULL;
err = cudaMalloc((int **)&d_edgeArray1, G->E*sizeof(int));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector edgeArray1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate the device edgeArray2
int* d_edgeArray2 = NULL;
err = cudaMalloc((int **)&d_edgeArray2, G->E*sizeof(int));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector edgeArray2 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate the device sinkArray
int* d_sinkArray = NULL;
err = cudaMalloc((int **)&d_sinkArray, G->countSink * sizeof(int));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector sinkArray (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate the device oldpr
float* d_oldpr = NULL;
err = cudaMalloc((float **)&d_oldpr, G->N*sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector oldpr (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate the device newpr output array
float* d_newpr = NULL;
err = cudaMalloc((float **)&d_newpr, G->N*sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector newpr (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//copy input data from host to device
cout<<"Copy input data from host memory to CUDA device memory\n";
err = cudaMemcpy(d_outdegreeArray, G->outdegree, (G->N)*sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector outdegree from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_edgeArray1, G->edgeArray1, G->E*sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector edgeArray1 from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_edgeArray2, G->edgeArray2, G->E*sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector edgeArray2 from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_sinkArray, G->sinkArray, G->countSink * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector sinkArray from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_oldpr, G->pr, G->N*sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector pr from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// grid and block dimension
dim3 grid(blocksPerGrid,1,1);
dim3 block(threadsPerBlock,1,1);
while(iter--){
// Launch the PageRank Update CUDA Kernel
EdgeCentric_phase1<<<grid, block>>>(d_newpr,d_oldpr,d_sinkArray, G->N,G->countSink,df);
EdgeCentric_phase2<<<grid, block>>>(d_outdegreeArray, d_edgeArray1, d_edgeArray2,
d_oldpr, d_newpr, G->E, df);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch UpdatePageRank kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//update the oldpr array
err = cudaMemcpy(d_oldpr,d_newpr,G->N*sizeof(float),cudaMemcpyDeviceToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy newpr array from device to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
err = cudaMemcpy(G->pr,d_oldpr,G->N*sizeof(float),cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy pr array from device to Host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//free up allocated memory
err = cudaFree(d_edgeArray1);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector edgeArray1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_edgeArray2);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector edgeArray2 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_oldpr);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector oldpr (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_newpr);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector newpr (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_outdegreeArray);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector outdegreeArray (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_sinkArray);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector sinkArray (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Reset the device
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int main(){
char input_file[] = "./Dataset/amazon.txt";
char output_file[] = "edgeCentric_output.txt";
Graph* G = readgraph(input_file);
initialisePageRank(G);
int threadsPerBlock = BLOCK_DIM;
int blocksPerGrid = (G->N+threadsPerBlock-1)/threadsPerBlock;
PageRank_edge_centric(G,10000,df,blocksPerGrid,threadsPerBlock);
cout<<"PageRank calculation done!!"<<endl;
storePageRank(G,output_file);
return 0;
}
|
bf5751e8b92522fd78c17f6d0687d8ab6c507e8c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
const int INF = ((1 << 30) - 1);
const int V = 20000;
void input(char* inFileName);
void output(char* outFileName);
int ceil(int a, int b);
__global__ void phase1(int round, int n, int V, int* Dist, int B);
__global__ void phase2(int round, int n, int V, int* Dist, int B);
__global__ void phase3(int round, int n, int V, int* Dist, int B);
extern __shared__ int S[];
int n, m;
int *d_Dist, **d_Dist_internal;
int *d_n, *d_m;
int Dist[V][V];
int main(int argc, char* argv[]) {
// hipEvent_t start, stop;
// hipEventCreate(&start);
// hipEventCreate(&stop);
// hipEventRecord(start);
input(argv[1]);
// for (int i = 0; i < n; ++i) {
// for (int j = 0; j < n; ++j)
// printf("%d\n", Dist[i][j]);
// }
hipMalloc((void **)&d_Dist, V * V * sizeof(int));
hipMalloc((void **)&d_n, sizeof(int));
hipMalloc((void **)&d_m, sizeof(int));
hipMemcpy(d_Dist, Dist, V * V * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_n, &n, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_m, &m, sizeof(int), hipMemcpyHostToDevice);
int B = 32;
int round = ceil(n, B);
dim3 grid1(1, 1);
dim3 grid2(round, 2);
dim3 grid3(round, round);
dim3 blk(B, B);
int num_threads = 32;
for (int r = 0; r < round; ++r) {
hipLaunchKernelGGL(( phase1), dim3(grid1), dim3(blk), B*B*sizeof(int), 0, r, n, V, d_Dist, B);
hipLaunchKernelGGL(( phase2), dim3(grid2), dim3(blk), 2*B*B*sizeof(int), 0, r, n, V, d_Dist, B);
hipLaunchKernelGGL(( phase3), dim3(grid3), dim3(blk), 2*B*B*sizeof(int), 0, r, n, V, d_Dist, B);
}
// hipEventRecord(stop);
// hipEventSynchronize(stop);
// hipDeviceSynchronize();
hipMemcpy(Dist, d_Dist, V * V * sizeof(int), hipMemcpyDeviceToHost);
output(argv[2]);
// for (int i = 0; i < n; ++i) {
// for (int j = 0; j < n; ++j)
// printf("%d\n", Dist[i][j]);
// }
// float time;
// hipEventElapsedTime(&time, start, stop);
// printf("total time: %f\n", time);
return 0;
}
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (i == j) {
Dist[i][j] = 0;
} else {
Dist[i][j] = INF;
}
}
}
int pair[3];
for (int i = 0; i < m; ++i) {
fread(pair, sizeof(int), 3, file);
Dist[pair[0]][pair[1]] = pair[2];
}
fclose(file);
}
void output(char* outFileName) {
FILE* outfile = fopen(outFileName, "w");
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (Dist[i][j] >= INF) Dist[i][j] = INF;
}
fwrite(Dist[i], sizeof(int), n, outfile);
}
fclose(outfile);
}
int ceil(int a, int b) { return (a + b - 1) / b; }
__global__ void phase1(int round, int n, int V, int* Dist, int B){
int s_i = threadIdx.y;
int s_j = threadIdx.x;
int i = round * B + s_i;
int j = round * B + s_j;
if((i < n && j < n))
S[s_i * B + s_j] = Dist[i * V + j];
__syncthreads();
int tt = round * B;
int ss = s_i * B;
#pragma unroll
for (int k = 0; k < B && tt + k < n; ++k) {
if (S[ss + k] + S[k * B + s_j] < S[ss + s_j])
S[ss + s_j] = S[ss + k] + S[k * B + s_j];
__syncthreads();
}
if (i < n && j < n) Dist[i * V + j] = S[ss + s_j];
__syncthreads();
}
__global__ void phase2(int round, int n, int V, int* Dist, int B){
if (blockIdx.x == round) return;
int* S_pivot = &S[0];
int* S_dist = &S[B * B];
int s_i = threadIdx.y;
int s_j = threadIdx.x;
int i = round * B + s_i;
int j = round * B + s_j;
int ss = s_i * B;
if((i < n && j < n))
S_pivot[ss + s_j] = Dist[i * V + j];
__syncthreads();
if (blockIdx.y == 0)
j = blockIdx.x * B + s_j;
else
i = blockIdx.x * B + s_i;
if (i >= n || j >= n) return;
if((i < n && j < n))
S_dist[ss + s_j] = Dist[i * V + j];
__syncthreads();
int tt = round * B;
if(blockIdx.y == 1){
#pragma unroll
for (int k = 0; k < B && tt + k < n; ++k) {
if (S_dist[ss + k] + S_pivot[k * B + s_j] < S_dist[ss + s_j])
S_dist[ss + s_j] = S_dist[ss + k] + S_pivot[k * B + s_j];
}
}else{
#pragma unroll
for (int k = 0; k < B && tt + k < n; ++k) {
if (S_pivot[ss + k] + S_dist[k * B + s_j] < S_dist[ss + s_j])
S_dist[ss + s_j] = S_pivot[ss + k] + S_dist[k * B + s_j];
}
}
if (i < n && j < n) Dist[i * V + j] = S_dist[ss + s_j];
__syncthreads();
}
__global__ void phase3(int round, int n, int V, int* Dist, int B){
if (blockIdx.x == round || blockIdx.y == round) return;
int* S_pivot_row = &S[0];
int* S_pivot_col= &S[B * B];
int s_i = threadIdx.y;
int s_j = threadIdx.x;
int i = blockIdx.y * B + s_i;
int j = blockIdx.x * B + s_j;
int b_i = round * B + s_i;
int b_j = round * B + s_j;
int ss = s_i * B;
if(i < n && b_j < n) S_pivot_row[ss + s_j] = Dist[i * V + b_j];
if(j < n && b_i < n) S_pivot_col[ss + s_j] = Dist[b_i * V + j];
__syncthreads();
if (i >= n || j >= n) return;
int dst = Dist[i * V + j];
int tt = round * B;
#pragma unroll
for (int k = 0; k < B && tt + k < n; ++k) {
if (S_pivot_row[ss + k] + S_pivot_col[k * B + s_j] < dst)
dst = S_pivot_row[ss + k] + S_pivot_col[k * B + s_j];
}
Dist[i * V + j] = dst;
}
| bf5751e8b92522fd78c17f6d0687d8ab6c507e8c.cu | #include <stdio.h>
#include <stdlib.h>
const int INF = ((1 << 30) - 1);
const int V = 20000;
void input(char* inFileName);
void output(char* outFileName);
int ceil(int a, int b);
__global__ void phase1(int round, int n, int V, int* Dist, int B);
__global__ void phase2(int round, int n, int V, int* Dist, int B);
__global__ void phase3(int round, int n, int V, int* Dist, int B);
extern __shared__ int S[];
int n, m;
int *d_Dist, **d_Dist_internal;
int *d_n, *d_m;
int Dist[V][V];
int main(int argc, char* argv[]) {
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// cudaEventRecord(start);
input(argv[1]);
// for (int i = 0; i < n; ++i) {
// for (int j = 0; j < n; ++j)
// printf("%d\n", Dist[i][j]);
// }
cudaMalloc((void **)&d_Dist, V * V * sizeof(int));
cudaMalloc((void **)&d_n, sizeof(int));
cudaMalloc((void **)&d_m, sizeof(int));
cudaMemcpy(d_Dist, Dist, V * V * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_n, &n, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_m, &m, sizeof(int), cudaMemcpyHostToDevice);
int B = 32;
int round = ceil(n, B);
dim3 grid1(1, 1);
dim3 grid2(round, 2);
dim3 grid3(round, round);
dim3 blk(B, B);
int num_threads = 32;
for (int r = 0; r < round; ++r) {
phase1<<<grid1, blk, B*B*sizeof(int)>>>(r, n, V, d_Dist, B);
phase2<<<grid2, blk, 2*B*B*sizeof(int)>>>(r, n, V, d_Dist, B);
phase3<<<grid3, blk, 2*B*B*sizeof(int)>>>(r, n, V, d_Dist, B);
}
// cudaEventRecord(stop);
// cudaEventSynchronize(stop);
// cudaThreadSynchronize();
cudaMemcpy(Dist, d_Dist, V * V * sizeof(int), cudaMemcpyDeviceToHost);
output(argv[2]);
// for (int i = 0; i < n; ++i) {
// for (int j = 0; j < n; ++j)
// printf("%d\n", Dist[i][j]);
// }
// float time;
// cudaEventElapsedTime(&time, start, stop);
// printf("total time: %f\n", time);
return 0;
}
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (i == j) {
Dist[i][j] = 0;
} else {
Dist[i][j] = INF;
}
}
}
int pair[3];
for (int i = 0; i < m; ++i) {
fread(pair, sizeof(int), 3, file);
Dist[pair[0]][pair[1]] = pair[2];
}
fclose(file);
}
void output(char* outFileName) {
FILE* outfile = fopen(outFileName, "w");
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (Dist[i][j] >= INF) Dist[i][j] = INF;
}
fwrite(Dist[i], sizeof(int), n, outfile);
}
fclose(outfile);
}
int ceil(int a, int b) { return (a + b - 1) / b; }
__global__ void phase1(int round, int n, int V, int* Dist, int B){
int s_i = threadIdx.y;
int s_j = threadIdx.x;
int i = round * B + s_i;
int j = round * B + s_j;
if((i < n && j < n))
S[s_i * B + s_j] = Dist[i * V + j];
__syncthreads();
int tt = round * B;
int ss = s_i * B;
#pragma unroll
for (int k = 0; k < B && tt + k < n; ++k) {
if (S[ss + k] + S[k * B + s_j] < S[ss + s_j])
S[ss + s_j] = S[ss + k] + S[k * B + s_j];
__syncthreads();
}
if (i < n && j < n) Dist[i * V + j] = S[ss + s_j];
__syncthreads();
}
__global__ void phase2(int round, int n, int V, int* Dist, int B){
if (blockIdx.x == round) return;
int* S_pivot = &S[0];
int* S_dist = &S[B * B];
int s_i = threadIdx.y;
int s_j = threadIdx.x;
int i = round * B + s_i;
int j = round * B + s_j;
int ss = s_i * B;
if((i < n && j < n))
S_pivot[ss + s_j] = Dist[i * V + j];
__syncthreads();
if (blockIdx.y == 0)
j = blockIdx.x * B + s_j;
else
i = blockIdx.x * B + s_i;
if (i >= n || j >= n) return;
if((i < n && j < n))
S_dist[ss + s_j] = Dist[i * V + j];
__syncthreads();
int tt = round * B;
if(blockIdx.y == 1){
#pragma unroll
for (int k = 0; k < B && tt + k < n; ++k) {
if (S_dist[ss + k] + S_pivot[k * B + s_j] < S_dist[ss + s_j])
S_dist[ss + s_j] = S_dist[ss + k] + S_pivot[k * B + s_j];
}
}else{
#pragma unroll
for (int k = 0; k < B && tt + k < n; ++k) {
if (S_pivot[ss + k] + S_dist[k * B + s_j] < S_dist[ss + s_j])
S_dist[ss + s_j] = S_pivot[ss + k] + S_dist[k * B + s_j];
}
}
if (i < n && j < n) Dist[i * V + j] = S_dist[ss + s_j];
__syncthreads();
}
__global__ void phase3(int round, int n, int V, int* Dist, int B){
if (blockIdx.x == round || blockIdx.y == round) return;
int* S_pivot_row = &S[0];
int* S_pivot_col= &S[B * B];
int s_i = threadIdx.y;
int s_j = threadIdx.x;
int i = blockIdx.y * B + s_i;
int j = blockIdx.x * B + s_j;
int b_i = round * B + s_i;
int b_j = round * B + s_j;
int ss = s_i * B;
if(i < n && b_j < n) S_pivot_row[ss + s_j] = Dist[i * V + b_j];
if(j < n && b_i < n) S_pivot_col[ss + s_j] = Dist[b_i * V + j];
__syncthreads();
if (i >= n || j >= n) return;
int dst = Dist[i * V + j];
int tt = round * B;
#pragma unroll
for (int k = 0; k < B && tt + k < n; ++k) {
if (S_pivot_row[ss + k] + S_pivot_col[k * B + s_j] < dst)
dst = S_pivot_row[ss + k] + S_pivot_col[k * B + s_j];
}
Dist[i * V + j] = dst;
}
|
3a63038091d8ea15f8e60feb8e7bbb400c061dcf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define MAX_ITER 128
#include <iostream>
#include <vector>
__global__ void ChildKernel(const int2 res, const int2 roi, const int2 offset,
const float dmin, const float* const d_in,
float* const d_out) {
const int x = offset.x + blockIdx.x * blockDim.x + threadIdx.x;
const int y = offset.y + blockIdx.y * blockDim.y + threadIdx.y;
d_out[x * res.y + y] += d_in[x * res.y + y] - dmin;
}
__global__ void ParentKernel(const int2 res, const int2 roi,
const float* const d_in, float* const d_out) {
const int x0 = roi.x * (blockIdx.x * blockDim.x + threadIdx.x);
const int y0 = roi.y * (blockIdx.y * blockDim.y + threadIdx.y);
const int x1 = min(x0 + roi.x, res.x);
const int y1 = min(y0 + roi.y, res.y);
// (technically) iterate a few times and find min distance,
// but here simulate with min over grid
float dmin{10000.0F};
for (int x = x0; x < x1; ++x) {
for (int y = y0; y < y1; ++y) {
dmin = min(dmin, d_out[x * res.y + y]);
}
}
// Just for fun, populate d_in with current d_min and allow child kernel to
// complete the rest.
for (int x = x0; x < x1; ++x) {
for (int y = y0; y < y1; ++y) {
d_out[x * res.y + y] = dmin;
}
}
// Launch child thread on each partition starting at the specified d_min.
const dim3 block_dims = {2, 2}; // ==roi
const dim3 grid_dims = {1, 1}; // I guess always one, for us?
hipLaunchKernelGGL(( ChildKernel), dim3(grid_dims), dim3(block_dims), 0, 0, res, roi, int2{x0, y0}, dmin, d_in,
d_out);
}
int main() {
// Configure
const int2 resolution = {32, 32};
const int2 roi = {2, 2};
const dim3 block_dims(resolution.x / roi.x, resolution.y / roi.y);
const dim3 grid_dims(1, 1);
const int num_bytes = resolution.x * resolution.y * sizeof(float);
// Fill Host Vector
std::vector<float> d_in_h(resolution.x * resolution.y);
for (int i = 0; i < resolution.x * resolution.y; ++i) {
d_in_h[i] = i;
}
std::vector<float> d_out_h(resolution.x * resolution.y);
// Allocate device vector
float *d_in, *d_out;
hipMalloc((void**)&d_in, num_bytes);
hipMalloc((void**)&d_out, num_bytes);
// Copy + Launch
hipMemcpy(d_in, d_in_h.data(), num_bytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ParentKernel), dim3(block_dims), dim3(grid_dims), 0, 0, resolution, roi, d_in, d_out);
hipMemcpy(d_out_h.data(), d_out, num_bytes, hipMemcpyDeviceToHost);
// Free
hipFree(d_in);
hipFree(d_out);
// Visualize
for (int i = 0; i < resolution.x; ++i) {
for (int j = 0; j < resolution.y; ++j) {
std::cout << (d_in_h[i * resolution.y + j] ==
d_out_h[i * resolution.y + j])
<< ' ';
}
std::cout << std::endl;
}
}
| 3a63038091d8ea15f8e60feb8e7bbb400c061dcf.cu |
#define MAX_ITER 128
#include <iostream>
#include <vector>
__global__ void ChildKernel(const int2 res, const int2 roi, const int2 offset,
const float dmin, const float* const d_in,
float* const d_out) {
const int x = offset.x + blockIdx.x * blockDim.x + threadIdx.x;
const int y = offset.y + blockIdx.y * blockDim.y + threadIdx.y;
d_out[x * res.y + y] += d_in[x * res.y + y] - dmin;
}
__global__ void ParentKernel(const int2 res, const int2 roi,
const float* const d_in, float* const d_out) {
const int x0 = roi.x * (blockIdx.x * blockDim.x + threadIdx.x);
const int y0 = roi.y * (blockIdx.y * blockDim.y + threadIdx.y);
const int x1 = min(x0 + roi.x, res.x);
const int y1 = min(y0 + roi.y, res.y);
// (technically) iterate a few times and find min distance,
// but here simulate with min over grid
float dmin{10000.0F};
for (int x = x0; x < x1; ++x) {
for (int y = y0; y < y1; ++y) {
dmin = min(dmin, d_out[x * res.y + y]);
}
}
// Just for fun, populate d_in with current d_min and allow child kernel to
// complete the rest.
for (int x = x0; x < x1; ++x) {
for (int y = y0; y < y1; ++y) {
d_out[x * res.y + y] = dmin;
}
}
// Launch child thread on each partition starting at the specified d_min.
const dim3 block_dims = {2, 2}; // ==roi
const dim3 grid_dims = {1, 1}; // I guess always one, for us?
ChildKernel<<<grid_dims, block_dims>>>(res, roi, int2{x0, y0}, dmin, d_in,
d_out);
}
int main() {
// Configure
const int2 resolution = {32, 32};
const int2 roi = {2, 2};
const dim3 block_dims(resolution.x / roi.x, resolution.y / roi.y);
const dim3 grid_dims(1, 1);
const int num_bytes = resolution.x * resolution.y * sizeof(float);
// Fill Host Vector
std::vector<float> d_in_h(resolution.x * resolution.y);
for (int i = 0; i < resolution.x * resolution.y; ++i) {
d_in_h[i] = i;
}
std::vector<float> d_out_h(resolution.x * resolution.y);
// Allocate device vector
float *d_in, *d_out;
cudaMalloc((void**)&d_in, num_bytes);
cudaMalloc((void**)&d_out, num_bytes);
// Copy + Launch
cudaMemcpy(d_in, d_in_h.data(), num_bytes, cudaMemcpyHostToDevice);
ParentKernel<<<block_dims, grid_dims>>>(resolution, roi, d_in, d_out);
cudaMemcpy(d_out_h.data(), d_out, num_bytes, cudaMemcpyDeviceToHost);
// Free
cudaFree(d_in);
cudaFree(d_out);
// Visualize
for (int i = 0; i < resolution.x; ++i) {
for (int j = 0; j < resolution.y; ++j) {
std::cout << (d_in_h[i * resolution.y + j] ==
d_out_h[i * resolution.y + j])
<< ' ';
}
std::cout << std::endl;
}
}
|
37be054e772b758f8eeedc3176dce00bd0b93a56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void adagrad_update_1D_1D(float* x, float* d, float* m, float clip, float lr, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride) {
if (d[tid] > clip) d[tid] = clip;
if (d[tid] < -clip) d[tid] = -clip;
m[tid] += d[tid] * d[tid];
x[tid] -= lr * d[tid] / sqrt(m[tid] + 0.00000001);
d[tid] = 0;
}
} | 37be054e772b758f8eeedc3176dce00bd0b93a56.cu | #include "includes.h"
__global__ void adagrad_update_1D_1D(float* x, float* d, float* m, float clip, float lr, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride) {
if (d[tid] > clip) d[tid] = clip;
if (d[tid] < -clip) d[tid] = -clip;
m[tid] += d[tid] * d[tid];
x[tid] -= lr * d[tid] / sqrt(m[tid] + 0.00000001);
d[tid] = 0;
}
} |
10f2883e530920dd6b6429208dafb34aa5fb038a.hip | // !!! This is a file automatically generated by hipify!!!
#include <benchmark/benchmark.h>
#include "gemv/args.hpp"
#include "init/init.hpp"
#include "utils/utils.hpp"
/* y = alpha * Ax + beta * y
hipblasStatus_t hipblasSgemv(hipblasHandle_t handle, hipblasOperation_t trans,
int m, int n,
const float *alpha,
const float *A, int lda,
const float *x, int incx,
const float *beta,
float *y, int incy) */
static void CUBLAS_GEMV(benchmark::State &state) {
const auto M_GLOBAL = state.range(0);
const auto N_GLOBAL = state.range(1);
const float alpha = 1.1f;
const float beta = 1.2f;
float *a_fp32;
float *x_fp32;
float *y_fp32;
PRINT_IF_ERROR(hipMalloc((void **) &a_fp32, M_GLOBAL * N_GLOBAL * sizeof(float)));
PRINT_IF_ERROR(hipMalloc((void **) &x_fp32, N_GLOBAL * sizeof(float)));
PRINT_IF_ERROR(hipMalloc((void **) &y_fp32, M_GLOBAL * sizeof(float)));
hiprandGenerator_t gen;
PRINT_IF_ERROR(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT));
PRINT_IF_ERROR(hiprandSetPseudoRandomGeneratorSeed(gen, 1337ULL));
PRINT_IF_ERROR(hiprandGenerateUniform(gen, a_fp32, M_GLOBAL * N_GLOBAL));
PRINT_IF_ERROR(hiprandGenerateUniform(gen, x_fp32, N_GLOBAL));
PRINT_IF_ERROR(hiprandGenerateUniform(gen, y_fp32, M_GLOBAL));
PRINT_IF_ERROR(hiprandDestroyGenerator(gen));
hipEvent_t start, stop;
PRINT_IF_ERROR(hipEventCreate(&start));
PRINT_IF_ERROR(hipEventCreate(&stop));
hipblasHandle_t cublasHandle;
PRINT_IF_ERROR(hipblasCreate(&cublasHandle));
// Not use tensor cores
PRINT_IF_ERROR(cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH));
const int incx = 1;
const int incy = 1;
for (auto _ : state) {
PRINT_IF_ERROR(hipEventRecord(start));
PRINT_IF_ERROR(hipblasSgemv(cublasHandle, HIPBLAS_OP_N, M_GLOBAL, N_GLOBAL, &alpha,
a_fp32, M_GLOBAL, x_fp32, incx, &beta, y_fp32, incy));
PRINT_IF_ERROR(hipEventRecord(stop));
PRINT_IF_ERROR(hipEventSynchronize(stop));
state.PauseTiming();
float msecTotal = 0.0f;
PRINT_IF_ERROR(hipEventElapsedTime(&msecTotal, start, stop));
state.SetIterationTime(msecTotal / 1000);
state.ResumeTiming();
}
hipEventDestroy(start);
hipEventDestroy(stop);
PRINT_IF_ERROR(hipFree(a_fp32));
PRINT_IF_ERROR(hipFree(x_fp32));
PRINT_IF_ERROR(hipFree(y_fp32));
hipDeviceReset();
state.counters.insert({{"M", M_GLOBAL},
{"N", N_GLOBAL},
{"num_elements", M_GLOBAL * N_GLOBAL},
{"flops",
{state.iterations() * 2.0 * M_GLOBAL * N_GLOBAL,
benchmark::Counter::kAvgThreadsRate}}});
}
BENCHMARK(CUBLAS_GEMV)->ARGS()->UseManualTime();
| 10f2883e530920dd6b6429208dafb34aa5fb038a.cu |
#include <benchmark/benchmark.h>
#include "gemv/args.hpp"
#include "init/init.hpp"
#include "utils/utils.hpp"
/* y = alpha * Ax + beta * y
cublasStatus_t cublasSgemv(cublasHandle_t handle, cublasOperation_t trans,
int m, int n,
const float *alpha,
const float *A, int lda,
const float *x, int incx,
const float *beta,
float *y, int incy) */
static void CUBLAS_GEMV(benchmark::State &state) {
const auto M_GLOBAL = state.range(0);
const auto N_GLOBAL = state.range(1);
const float alpha = 1.1f;
const float beta = 1.2f;
float *a_fp32;
float *x_fp32;
float *y_fp32;
PRINT_IF_ERROR(cudaMalloc((void **) &a_fp32, M_GLOBAL * N_GLOBAL * sizeof(float)));
PRINT_IF_ERROR(cudaMalloc((void **) &x_fp32, N_GLOBAL * sizeof(float)));
PRINT_IF_ERROR(cudaMalloc((void **) &y_fp32, M_GLOBAL * sizeof(float)));
curandGenerator_t gen;
PRINT_IF_ERROR(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
PRINT_IF_ERROR(curandSetPseudoRandomGeneratorSeed(gen, 1337ULL));
PRINT_IF_ERROR(curandGenerateUniform(gen, a_fp32, M_GLOBAL * N_GLOBAL));
PRINT_IF_ERROR(curandGenerateUniform(gen, x_fp32, N_GLOBAL));
PRINT_IF_ERROR(curandGenerateUniform(gen, y_fp32, M_GLOBAL));
PRINT_IF_ERROR(curandDestroyGenerator(gen));
cudaEvent_t start, stop;
PRINT_IF_ERROR(cudaEventCreate(&start));
PRINT_IF_ERROR(cudaEventCreate(&stop));
cublasHandle_t cublasHandle;
PRINT_IF_ERROR(cublasCreate(&cublasHandle));
// Not use tensor cores
PRINT_IF_ERROR(cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH));
const int incx = 1;
const int incy = 1;
for (auto _ : state) {
PRINT_IF_ERROR(cudaEventRecord(start));
PRINT_IF_ERROR(cublasSgemv(cublasHandle, CUBLAS_OP_N, M_GLOBAL, N_GLOBAL, &alpha,
a_fp32, M_GLOBAL, x_fp32, incx, &beta, y_fp32, incy));
PRINT_IF_ERROR(cudaEventRecord(stop));
PRINT_IF_ERROR(cudaEventSynchronize(stop));
state.PauseTiming();
float msecTotal = 0.0f;
PRINT_IF_ERROR(cudaEventElapsedTime(&msecTotal, start, stop));
state.SetIterationTime(msecTotal / 1000);
state.ResumeTiming();
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
PRINT_IF_ERROR(cudaFree(a_fp32));
PRINT_IF_ERROR(cudaFree(x_fp32));
PRINT_IF_ERROR(cudaFree(y_fp32));
cudaDeviceReset();
state.counters.insert({{"M", M_GLOBAL},
{"N", N_GLOBAL},
{"num_elements", M_GLOBAL * N_GLOBAL},
{"flops",
{state.iterations() * 2.0 * M_GLOBAL * N_GLOBAL,
benchmark::Counter::kAvgThreadsRate}}});
}
BENCHMARK(CUBLAS_GEMV)->ARGS()->UseManualTime();
|
df43e82fe53dfa56032baac6379904f177689d93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "traffic.h"
static const int kNumBlockSize = 256;
static const char kCellTypeNormal = 1;
static const char kCellTypeProducer = 2;
using IndexT = int;
using CellPointerT = IndexT;
#include "../dataset.h"
__managed__ CellBase **dev_cells;
// Need 2 arrays of both, so we can swap.
__device__ int *d_Car_active;
__device__ int *d_Car_active_2;
__managed__ CarBase **dev_cars;
__managed__ CarBase **dev_cars_2;
// For prefix sum array compaction.
__device__ int *d_prefix_sum_temp;
__device__ int *d_prefix_sum_output;
int *h_prefix_sum_temp;
int *h_prefix_sum_output;
int *h_Car_active;
int *h_Car_active_2;
__device__ int d_num_cells;
__device__ int d_num_cars;
__device__ int d_num_cars_2;
int host_num_cells;
int host_num_cars;
// TODO: Consider migrating to SoaAlloc.
TrafficLight *h_traffic_lights;
__managed__ TrafficLightBase **d_traffic_lights;
// Only for rendering.
__device__ int dev_num_cells;
__device__ float *dev_Cell_pos_x;
__device__ float *dev_Cell_pos_y;
__device__ bool *dev_Cell_occupied;
float *host_Cell_pos_x;
float *host_Cell_pos_y;
bool *host_Cell_occupied;
float *host_data_Cell_pos_x;
float *host_data_Cell_pos_y;
bool *host_data_Cell_occupied;
// coal ready
__device__ void Car_step_extend_path(IndexT self) {
void **vtable;
//if(self == 0 ) printf("-----------$self %p \n",dev_cars[self]);
COAL_CarBase_get_position(dev_cars[self]);
CellBase *cell = CLEANPTR( dev_cars[self] ,CarBase *)->get_position();
CellBase *next_cell;
COAL_CarBase_get_velocity(dev_cars[self]);
for (int i = 0; i < CLEANPTR( dev_cars[self] ,CarBase *)->get_velocity(); ++i) {
COAL_CellBase_get_is_target(cell);
bool cond = CLEANPTR( cell ,CellBase *)->get_is_target();
COAL_CellBase_is_sink(cell);
if (CLEANPTR( cell ,CellBase *)->is_sink() || cond) {
break;
}
COAL_CarBase_next_step(dev_cars[self]);
next_cell = CLEANPTR( dev_cars[self] ,CarBase *)->next_step(cell);
assert(next_cell != cell);
// printf ("hiiiii %p %d\n",next_cell, self);
COAL_CellBase_is_free(next_cell);
if (!(CLEANPTR( next_cell ,CellBase *)->is_free())) break;
cell = next_cell;
COAL_CarBase_set_path(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_path(cell, i);
COAL_CarBase_get_path_length(dev_cars[self]);
int path_len = CLEANPTR( dev_cars[self] ,CarBase *)->get_path_length();
COAL_CarBase_set_path_length(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_path_length(path_len + 1);
}
COAL_CarBase_get_path_length(dev_cars[self]);
int path_len = CLEANPTR( dev_cars[self] ,CarBase *)->get_path_length();
COAL_CarBase_set_velocity(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_velocity(path_len);
}
__device__ void Car_step_constraint_velocity(IndexT self) {
void **vtable;
// This is actually only needed for the very first iteration, because a car
// may be positioned on a traffic light cell.
COAL_CarBase_get_velocity(dev_cars[self]);
int vel = CLEANPTR( dev_cars[self] ,CarBase *)->get_velocity();
COAL_CarBase_get_position(dev_cars[self]);
CellBase *cell = CLEANPTR( dev_cars[self] ,CarBase *)->get_position();
COAL_CellBase_get_current_max_velocity(cell);
if (vel > CLEANPTR( cell ,CellBase *)->get_current_max_velocity()) {
COAL_CellBase_get_current_max_velocity(cell);
int max_velocity = CLEANPTR( cell ,CellBase *)->get_current_max_velocity();
COAL_CarBase_set_velocity(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_velocity(max_velocity);
}
int path_index = 0;
int distance = 1;
COAL_CarBase_get_velocity(dev_cars[self]);
while (distance <= CLEANPTR( dev_cars[self] ,CarBase *)->get_velocity()) {
// Invariant: Movement of up to `distance - 1` many cells at `velocity_`
// is allowed.
// Now check if next cell can be entered.
COAL_CarBase_get_path(dev_cars[self]);
CellBase *next_cell = CLEANPTR( dev_cars[self] ,CarBase *)->get_path(path_index);
COAL_CellBase_is_free(next_cell);
// Avoid collision.
if (!CLEANPTR( next_cell ,CellBase *)->is_free()) {
// Cannot enter cell.
--distance;
COAL_CarBase_set_velocity(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_velocity(distance);
break;
} // else: Can enter next cell.
COAL_CarBase_get_velocity(dev_cars[self]);
int curr_vel = CLEANPTR( dev_cars[self] ,CarBase *)->get_velocity();
COAL_CellBase_get_current_max_velocity(next_cell);
if (curr_vel > CLEANPTR( next_cell ,CellBase *)->get_current_max_velocity()) {
// Car is too fast for this cell.
COAL_CellBase_get_current_max_velocity(next_cell);
if (CLEANPTR( next_cell ,CellBase *)->get_current_max_velocity() > distance - 1) {
// Even if we slow down, we would still make progress.
COAL_CellBase_get_current_max_velocity(next_cell);
int max = CLEANPTR( next_cell ,CellBase *)->get_current_max_velocity();
COAL_CarBase_set_velocity(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_velocity(max);
} else {
// Do not enter the next cell.
--distance;
assert(distance >= 0);
COAL_CarBase_set_velocity(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_velocity(distance);
break;
}
}
++distance;
++path_index;
}
--distance;
#ifndef NDEBUG
COAL_CarBase_get_velocity(dev_cars[self]);
for (int i = 0; i < CLEANPTR( dev_cars[self] ,CarBase *)->get_velocity(); ++i) {
COAL_CarBase_get_path(dev_cars[self]);
CellBase *path = CLEANPTR( dev_cars[self] ,CarBase *)->get_path(i);
COAL_CellBase_is_free(path);
assert(CLEANPTR( path ,CellBase *)->is_free());
COAL_CarBase_get_path(dev_cars[self]);
assert(i == 0 || CLEANPTR( dev_cars[self] ,CarBase *)->get_path(i - 1) != path);
}
// TODO: Check why the cast is necessary.
COAL_CarBase_get_velocity(dev_cars[self]);
assert(distance <= CLEANPTR( dev_cars[self] ,CarBase *)->get_velocity());
#endif // NDEBUG
}
__device__ void Car_step_move(IndexT self) {
void **vtable;
COAL_CarBase_get_position(dev_cars[self]);
CellBase *cell = CLEANPTR( dev_cars[self] ,CarBase *)->get_position();
COAL_CarBase_get_velocity(dev_cars[self]);
for (int i = 0; i < CLEANPTR( dev_cars[self] ,CarBase *)->get_velocity(); ++i) {
COAL_CarBase_get_path(dev_cars[self]);
assert(CLEANPTR( dev_cars[self] ,CarBase *)->get_path(i) != cell);
COAL_CarBase_get_path(dev_cars[self]);
cell = CLEANPTR( dev_cars[self] ,CarBase *)->get_path(i);
COAL_CellBase_is_free(cell);
assert(CLEANPTR( cell ,CellBase *)->is_free());
COAL_CarBase_get_position(dev_cars[self]);
CellBase *ptr = CLEANPTR( dev_cars[self] ,CarBase *)->get_position();
COAL_CellBase_release(ptr);
CLEANPTR( ptr ,CellBase *)->release();
COAL_CellBase_occupy(cell);
CLEANPTR( cell ,CellBase *)->occupy(dev_cars[self]);
COAL_CarBase_set_position(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_position(cell);
}
COAL_CarBase_get_position(dev_cars[self]);
CellBase *ptr = CLEANPTR( dev_cars[self] ,CarBase *)->get_position();
COAL_CellBase_is_sink(ptr);
bool cond = CLEANPTR( ptr ,CellBase *)->is_sink();
COAL_CellBase_get_is_target(ptr);
if (cond || CLEANPTR( ptr ,CellBase *)->get_is_target()) {
// Remove car from the simulation. Will be added again in the next
// iteration.
COAL_CellBase_release(ptr);
CLEANPTR( ptr ,CellBase *)->release();
COAL_CarBase_set_position(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_position(nullptr);
d_Car_active[self] = 0;
}
}
__device__ void Car_step_slow_down(IndexT self) {
void **vtable;
// 20% change of slowdown.
CarBase *ptr = dev_cars[self];
COAL_CarBase_get_velocity(ptr);
int vel = CLEANPTR( ptr ,CarBase *)->get_velocity();
COAL_CarBase_random_uni(ptr);
if (CLEANPTR( ptr ,CarBase *)->random_uni() < 0.2 && vel > 0) {
COAL_CarBase_set_velocity(ptr);
CLEANPTR( ptr ,CarBase *)->set_velocity(vel - 1);
}
}
__device__ IndexT new_Car(int seed, IndexT cell, int max_velocity) {
void **vtable;
IndexT idx = atomicAdd(&d_num_cars, 1);
assert(idx >= 0 && idx < kMaxNumCars);
CarBase *ptr = dev_cars[idx];
assert(!d_Car_active[idx]);
COAL_CarBase_set_position(ptr);
CLEANPTR( ptr ,CarBase *)->set_position(dev_cells[cell]);
COAL_CarBase_set_path_length(ptr);
CLEANPTR( ptr ,CarBase *)->set_path_length(0);
COAL_CarBase_set_velocity(ptr);
CLEANPTR( ptr ,CarBase *)->set_velocity(0);
COAL_CarBase_set_max_velocity(ptr);
CLEANPTR( ptr ,CarBase *)->set_max_velocity(max_velocity);
d_Car_active[idx] = 1;
COAL_CellBase_is_free(dev_cells[cell]);
assert(CLEANPTR( dev_cells[cell] ,CellBase *)->is_free());
COAL_CellBase_occupy(dev_cells[cell]);
CLEANPTR( dev_cells[cell] ,CellBase *)->occupy(dev_cars[idx]);
hiprand_init(seed, 0, 0, &CLEANPTR( ptr ,CarBase *)->random_state);
return idx;
}
__device__ void ProducerCell_create_car(IndexT self) {
void **vtable;
assert(CLEANPTR( dev_cells[self] ,CellBase *)->type == kCellTypeProducer);
COAL_CellBase_is_free(dev_cells[self]);
if (CLEANPTR( dev_cells[self] ,CellBase *)->is_free()) {
float r = hiprand_uniform(&CLEANPTR( dev_cells[self] ,CellBase *)->random_state);
if (r < kCarAllocationRatio) {
IndexT new_car = new_Car(
/*seed=*/hiprand(&CLEANPTR( dev_cells[self] ,CellBase *)->random_state), /*cell=*/self,
/*max_velocity=*/hiprand(&CLEANPTR( dev_cells[self] ,CellBase *)->random_state) %
(kMaxVelocity / 2) +
kMaxVelocity / 2);
}
}
}
__device__ IndexT new_Cell(int max_velocity, float x, float y) {
IndexT idx = atomicAdd(&d_num_cells, 1);
CLEANPTR(dev_cells[idx],CellBase *)->car = nullptr;
CLEANPTR(dev_cells[idx],CellBase *)->max_velocity = max_velocity;
CLEANPTR(dev_cells[idx],CellBase *)->current_max_velocity = max_velocity;
CLEANPTR(dev_cells[idx],CellBase *)->num_incoming = 0;
CLEANPTR(dev_cells[idx],CellBase *)->num_outgoing = 0;
CLEANPTR(dev_cells[idx],CellBase *)->x = x;
CLEANPTR(dev_cells[idx],CellBase *)->y = y;
CLEANPTR(dev_cells[idx],CellBase *)->is_target = false;
CLEANPTR(dev_cells[idx],CellBase *)->type = kCellTypeNormal;
return idx;
}
__device__ IndexT new_ProducerCell(int max_velocity, float x, float y,
int seed) {
IndexT idx = new_Cell(max_velocity, x, y);
CLEANPTR(dev_cells[idx],CellBase *)->type = kCellTypeProducer;
hiprand_init(seed, 0, 0, &CLEANPTR(dev_cells[idx],CellBase *)->random_state);
return idx;
}
__global__ void kernel_traffic_light_step() {
void **vtable;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections;
i += blockDim.x * gridDim.x) {
TrafficLightBase *ptr = d_traffic_lights[i];
COAL_TrafficLightBase_get_num_cells(ptr);
if (CLEANPTR( ptr ,TrafficLightBase *)->get_num_cells() > 0) {
COAL_TrafficLightBase_get_timer(ptr);
int timer = CLEANPTR( ptr ,TrafficLightBase *)->get_timer();
COAL_TrafficLightBase_get_phase_time(ptr);
int phase_time = CLEANPTR( ptr ,TrafficLightBase *)->get_phase_time();
COAL_TrafficLightBase_set_timer(ptr);
CLEANPTR( ptr ,TrafficLightBase *)->set_timer((timer + 1) % phase_time);
COAL_TrafficLightBase_get_timer(ptr);
if (CLEANPTR( ptr ,TrafficLightBase *)->get_timer() == 0) {
COAL_TrafficLightBase_get_phase(ptr);
int phase = CLEANPTR( ptr ,TrafficLightBase *)->get_phase();
COAL_TrafficLightBase_get_cell(ptr);
assert(CLEANPTR( ptr ,TrafficLightBase *)->get_cell(phase) != nullptr);
COAL_TrafficLightBase_get_phase(ptr);
phase = CLEANPTR( ptr ,TrafficLightBase *)->get_phase();
COAL_TrafficLightBase_get_cell(ptr);
CellBase *ptr2 = CLEANPTR( ptr ,TrafficLightBase *)->get_cell(phase);
COAL_CellBase_set_current_max_velocity(ptr2);
CLEANPTR( ptr2 ,CellBase *)->set_current_max_velocity(0);
COAL_TrafficLightBase_get_phase(ptr);
int phase_2 = CLEANPTR( ptr ,TrafficLightBase *)->get_phase();
COAL_TrafficLightBase_get_num_cells(ptr);
int num_cells = CLEANPTR( ptr ,TrafficLightBase *)->get_num_cells();
COAL_TrafficLightBase_set_phase(ptr);
CLEANPTR( ptr ,TrafficLightBase *)->set_phase((phase_2 + 1) % num_cells);
COAL_TrafficLightBase_get_phase(ptr);
phase_2 = CLEANPTR( ptr ,TrafficLightBase *)->get_phase();
COAL_TrafficLightBase_get_cell(ptr);
ptr2 = CLEANPTR( ptr ,TrafficLightBase *)->get_cell(phase_2);
COAL_CellBase_remove_speed_limit(ptr2);
CLEANPTR( ptr2 ,CellBase *)->remove_speed_limit();
}
}
// d_traffic_lights[i]->step();
}
}
__global__ void kernel_create_nodes() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections;
i += blockDim.x * gridDim.x) {
hiprandState_t state;
hiprand_init(i, 0, 0, &state);
assert(d_nodes[i].x >= 0 && d_nodes[i].x <= 1);
assert(d_nodes[i].y >= 0 && d_nodes[i].y <= 1);
for (int j = 0; j < d_nodes[i].num_outgoing; ++j) {
d_nodes[i].cell_out[j] = new_Cell(
/*max_velocity=*/hiprand(&state) % (kMaxVelocity / 2) +
kMaxVelocity / 2,
d_nodes[i].x, d_nodes[i].y);
}
}
}
__device__ IndexT connect_intersections(IndexT from, Node *target,
int incoming_idx,
hiprandState_t &state) {
// Create edge.
float dx = target->x - CLEANPTR( dev_cells[from] ,CellBase *)->x;
float dy = target->y - CLEANPTR( dev_cells[from] ,CellBase *)->y;
float dist = sqrt(dx * dx + dy * dy);
int steps = dist / kCellLength;
float step_x = dx / steps;
float step_y = dy / steps;
IndexT prev = from;
for (int j = 0; j < steps; ++j) {
float new_x = CLEANPTR( dev_cells[from] ,CellBase *)->x + j * step_x;
float new_y = CLEANPTR( dev_cells[from] ,CellBase *)->y + j * step_y;
assert(new_x >= 0 && new_x <= 1);
assert(new_y >= 0 && new_y <= 1);
IndexT next;
if (hiprand_uniform(&state) < kProducerRatio) {
next = new_ProducerCell(CLEANPTR( dev_cells[prev] ,CellBase *)->max_velocity, new_x, new_y,
hiprand(&state));
} else {
next = new_Cell(CLEANPTR( dev_cells[prev] ,CellBase *)->max_velocity, new_x, new_y);
}
if (hiprand_uniform(&state) < kTargetRatio) {
CLEANPTR( dev_cells[next] ,CellBase *)->set_target();
}
CLEANPTR( dev_cells[prev] ,CellBase *)->set_num_outgoing(1);
CLEANPTR( dev_cells[prev] ,CellBase *)->set_outgoing(0,dev_cells[next] );
CLEANPTR( dev_cells[next] ,CellBase *)->set_num_incoming(1);
CLEANPTR( dev_cells[next] ,CellBase *)->set_incoming(0, dev_cells[prev]);
prev = next;
}
// Connect to all outgoing nodes of target.
CLEANPTR( dev_cells[prev] ,CellBase *)->set_num_outgoing(target->num_outgoing);
for (int i = 0; i < target->num_outgoing; ++i) {
IndexT next = target->cell_out[i];
// num_incoming set later.
CLEANPTR( dev_cells[prev] ,CellBase *)->set_outgoing(i, dev_cells[next] );
CLEANPTR( dev_cells[next] ,CellBase *)->set_incoming(incoming_idx, dev_cells[prev] );
}
return prev;
}
__global__ void kernel_create_edges() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections;
i += blockDim.x * gridDim.x) {
hiprandState_t state;
hiprand_init(i, 0, 0, &state);
for (int k = 0; k < d_nodes[i].num_outgoing; ++k) {
int target = d_nodes[i].node_out[k];
int target_pos = d_nodes[i].node_out_pos[k];
IndexT last = connect_intersections(
d_nodes[i].cell_out[k], &d_nodes[target], target_pos, state);
CLEANPTR(dev_cells[last],CellBase *)->set_current_max_velocity(0);
d_nodes[target].cell_in[target_pos] = last;
}
}
}
__global__ void kernel_create_traffic_lights() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections;
i += blockDim.x * gridDim.x) {
new (CLEANPTR(d_traffic_lights[i], TrafficLightBase *)) TrafficLight(
/*num_cells=*/d_nodes[i].num_incoming,
/*phase_time=*/5);
for (int j = 0; j < d_nodes[i].num_outgoing; ++j) {
CLEANPTR( dev_cells[d_nodes[i].cell_out[j]],CellBase *) ->set_num_incoming(
d_nodes[i].num_incoming);
}
for (int j = 0; j < d_nodes[i].num_incoming; ++j) {
CLEANPTR(d_traffic_lights[i], TrafficLightBase *)->set_cell(j, dev_cells[d_nodes[i].cell_in[j]]);
CLEANPTR(dev_cells[d_nodes[i].cell_in[j]],CellBase *)->set_current_max_velocity(
0); // Set to "red".
}
}
}
template <class Type, class TypeBase>
__global__ void device_alloc(TypeBase **ptr, int size) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size;
i += blockDim.x * gridDim.x) {
ptr[i] = new Type();
assert(ptr[i] != nullptr);
}
}
void create_street_network(obj_alloc *alloc) {
int zero = 0;
hipMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0,
hipMemcpyHostToDevice);
hipMalloc(&h_nodes, sizeof(Node) * kNumIntersections);
hipMemcpyToSymbol(d_nodes, &h_nodes, sizeof(Node *), 0,
hipMemcpyHostToDevice);
// hipMalloc(&d_traffic_lights, sizeof(TrafficLight *) *
// kNumIntersections);
// device_alloc<TrafficLight, TrafficLightBase>
// <<<(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize,
// kNumBlockSize>>>(d_traffic_lights, kNumIntersections);
// gpuErrchk(hipDeviceSynchronize());
d_traffic_lights = (TrafficLightBase **)alloc->calloc<TrafficLightBase *>(
kNumIntersections);
for (int i = 0; i < kNumIntersections; i += 1) {
d_traffic_lights[i] = (TrafficLightBase *)alloc->my_new<TrafficLight>();
}
alloc->toDevice();
// Create basic structure on host.
create_network_structure();
hipLaunchKernelGGL(( kernel_create_nodes), (kNumIntersections + kNumBlockSize - 1) /
kNumBlockSize,
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_create_edges), (kNumIntersections + kNumBlockSize - 1) /
kNumBlockSize,
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_create_traffic_lights), (kNumIntersections + kNumBlockSize - 1) /
kNumBlockSize,
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
// Allocate helper data structures for rendering.
hipMemcpyFromSymbol(&host_num_cells, d_num_cells, sizeof(int), 0,
hipMemcpyDeviceToHost);
hipMalloc(&host_Cell_pos_x, sizeof(float) * host_num_cells);
hipMemcpyToSymbol(dev_Cell_pos_x, &host_Cell_pos_x, sizeof(float *), 0,
hipMemcpyHostToDevice);
hipMalloc(&host_Cell_pos_y, sizeof(float) * host_num_cells);
hipMemcpyToSymbol(dev_Cell_pos_y, &host_Cell_pos_y, sizeof(float *), 0,
hipMemcpyHostToDevice);
hipMalloc(&host_Cell_occupied, sizeof(bool) * host_num_cells);
hipMemcpyToSymbol(dev_Cell_occupied, &host_Cell_occupied, sizeof(bool *),
0, hipMemcpyHostToDevice);
host_data_Cell_pos_x = (float *)malloc(sizeof(float) * host_num_cells);
host_data_Cell_pos_y = (float *)malloc(sizeof(float) * host_num_cells);
host_data_Cell_occupied = (bool *)malloc(sizeof(bool) * host_num_cells);
#ifndef NDEBUG
printf("Number of cells: %i\n", host_num_cells);
#endif // NDEBUG
}
void step_traffic_lights() {
// TODO: Consider migrating this to SoaAlloc.
hipLaunchKernelGGL(( kernel_traffic_light_step), (kNumIntersections + kNumBlockSize - 1) /
kNumBlockSize,
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
__device__ void Cell_add_to_rendering_array(IndexT self) {
int idx = atomicAdd(&dev_num_cells, 1);
dev_Cell_pos_x[idx] = dev_cells[self]->x;
dev_Cell_pos_y[idx] = dev_cells[self]->y;
dev_Cell_occupied[idx] = !dev_cells[self]->is_free();
}
__global__ void kernel_Cell_add_to_rendering_array() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cells;
i += blockDim.x * gridDim.x) {
Cell_add_to_rendering_array(i);
}
}
void transfer_data() {
int zero = 0;
hipMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_Cell_add_to_rendering_array), (host_num_cells + kNumBlockSize - 1) /
kNumBlockSize,
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipMemcpy(host_data_Cell_pos_x, host_Cell_pos_x,
sizeof(float) * host_num_cells, hipMemcpyDeviceToHost);
hipMemcpy(host_data_Cell_pos_y, host_Cell_pos_y,
sizeof(float) * host_num_cells, hipMemcpyDeviceToHost);
hipMemcpy(host_data_Cell_occupied, host_Cell_occupied,
sizeof(bool) * host_num_cells, hipMemcpyDeviceToHost);
gpuErrchk(hipDeviceSynchronize());
}
__global__ void kernel_ProducerCell_create_car() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cells;
i += blockDim.x * gridDim.x) {
if (CLEANPTR( dev_cells[i] ,CellBase *)->type == kCellTypeProducer) {
ProducerCell_create_car(i);
}
}
}
__device__ void Car_step_prepare_path(IndexT self) {
void **vtable;
COAL_CarBase_step_initialize_iteration(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->step_initialize_iteration();
COAL_CarBase_step_accelerate(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->step_accelerate();
Car_step_extend_path(self);
Car_step_constraint_velocity(self);
Car_step_slow_down(self);
}
__global__ void kernel_Car_step_prepare_path() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
if (d_Car_active[i]) {
Car_step_prepare_path(i);
}
}
}
__global__ void kernel_fill_car_indices() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
d_Car_active[i] = 0;
d_Car_active_2[i] = 0;
}
}
__global__ void kernel_Car_step_move() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
if (d_Car_active[i]) {
Car_step_move(i);
}
}
}
__device__ int d_checksum;
__global__ void kernel_compute_checksum() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
if (d_Car_active[i]) {
atomicAdd(&d_checksum, 1);
}
}
}
int checksum() {
int zero = 0;
hipMemcpyToSymbol(d_checksum, &zero, sizeof(int), 0,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_compute_checksum), dim3(128), dim3(128), 0, 0, );
int result;
hipMemcpyFromSymbol(&result, d_checksum, sizeof(int), 0,
hipMemcpyDeviceToHost);
return result;
}
void step() {
hipLaunchKernelGGL(( kernel_ProducerCell_create_car), (host_num_cells + kNumBlockSize - 1) /
kNumBlockSize,
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipMemcpyFromSymbol(&host_num_cars, d_num_cars, sizeof(int), 0,
hipMemcpyDeviceToHost);
step_traffic_lights();
hipLaunchKernelGGL(( kernel_Car_step_prepare_path),
dim3((host_num_cars + kNumBlockSize - 1) / kNumBlockSize), dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_Car_step_move), dim3((host_num_cars + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
void allocate_memory(obj_alloc *alloc) {
// hipMalloc(&dev_cells, sizeof(Cell *) * kMaxNumCells);
dev_cells = (CellBase **)alloc->calloc<CellBase *>(kMaxNumCells);
for (int i = 0; i < kMaxNumCells; i += 1) {
dev_cells[i] = (CellBase *)alloc->my_new<Cell>();
}
for (int i = 0; i < kMaxNumCells; i += 1) {
(dummy *)alloc->my_new<dummy>();
}
// device_alloc<Cell, CellBase>
// <<<(kMaxNumCells + kNumBlockSize - 1) / kNumBlockSize,
// kNumBlockSize>>>(
// dev_cells, kMaxNumCells);
gpuErrchk(hipDeviceSynchronize());
// hipMalloc(&dev_cars, sizeof(Car *) * kMaxNumCars);
// hipMalloc(&dev_cars_2, sizeof(Car *) * kMaxNumCars);
dev_cars = (CarBase **)alloc->calloc<CarBase *>(kMaxNumCars);
for (int i = 0; i < kMaxNumCars; i += 1) {
dev_cars[i] = (CarBase *)alloc->my_new<Car>();
}
dev_cars_2 = (CarBase **)alloc->calloc<CarBase *>(kMaxNumCars);
for (int i = 0; i < kMaxNumCars; i += 1) {
dev_cars_2[i] = (CarBase *)alloc->my_new<Car>();
}
alloc->toDevice();
// device_alloc<Car, CarBase>
// <<<(kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize,
// kNumBlockSize>>>(
// dev_cars, kMaxNumCars);
// gpuErrchk(hipDeviceSynchronize());
// device_alloc<Car>
// <<<(kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize,
// kNumBlockSize>>>(
// dev_cars_2, kMaxNumCars);
gpuErrchk(hipDeviceSynchronize());
hipMalloc(&h_Car_active, sizeof(int) * kMaxNumCars);
hipMemcpyToSymbol(d_Car_active, &h_Car_active, sizeof(int *), 0,
hipMemcpyHostToDevice);
// Car *h_cars_2;
// hipMalloc(&h_cars_2, sizeof(Car) * kMaxNumCars);
// hipMemcpyToSymbol(dev_cars_2, &h_cars_2, sizeof(Car *), 0,
// hipMemcpyHostToDevice);
hipMalloc(&h_Car_active_2, sizeof(int) * kMaxNumCars);
hipMemcpyToSymbol(d_Car_active_2, &h_Car_active_2, sizeof(int *), 0,
hipMemcpyHostToDevice);
hipMalloc(&h_prefix_sum_temp, 3 * sizeof(int) * kMaxNumCars);
hipMemcpyToSymbol(d_prefix_sum_temp, &h_prefix_sum_temp, sizeof(int *), 0,
hipMemcpyHostToDevice);
hipMalloc(&h_prefix_sum_output, sizeof(int) * kMaxNumCars);
hipMemcpyToSymbol(d_prefix_sum_output, &h_prefix_sum_output, sizeof(int *),
0, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_fill_car_indices), dim3(128), dim3(128), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
int zero = 0;
hipMemcpyToSymbol(d_num_cells, &zero, sizeof(int), 0,
hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_num_cars, &zero, sizeof(int), 0,
hipMemcpyHostToDevice);
}
__global__ void kernel_compact_initialize() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kMaxNumCars;
i += blockDim.x * gridDim.x) {
d_Car_active_2[i] = 0;
}
}
__global__ void kernel_compact_cars() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
if (d_Car_active[i]) {
int target = d_prefix_sum_output[i];
// Copy i --> target.
// dev_cars_2[target] = dev_cars[i];
memcpy(CLEANPTR( dev_cars_2[target] ,CarBase *), CLEANPTR( dev_cars[i] ,CarBase *), sizeof(Car));
d_Car_active_2[target] = 1;
// Update pointer in Cell.
CLEANPTR(CLEANPTR( dev_cars[i] ,CarBase *)->position,CellBase *)->car = dev_cars[target];
atomicAdd(&d_num_cars_2, 1);
}
}
}
__global__ void kernel_compact_swap_pointers() {
{
auto *tmp = dev_cars;
dev_cars = dev_cars_2;
dev_cars_2 = tmp;
}
{
auto *tmp = d_Car_active;
d_Car_active = d_Car_active_2;
d_Car_active_2 = tmp;
}
d_num_cars = d_num_cars_2;
}
void compact_car_array() {
int zero = 0;
hipMemcpyToSymbol(d_num_cars_2, &zero, sizeof(int), 0,
hipMemcpyHostToDevice);
hipMemcpyFromSymbol(&host_num_cars, d_num_cars, sizeof(int), 0,
hipMemcpyDeviceToHost);
// TODO: Prefix sum broken for num_objects < 256.
auto prefix_sum_size = host_num_cars < 256 ? 256 : host_num_cars;
size_t temp_size = 3 * kMaxNumCars;
hipcub::DeviceScan::ExclusiveSum(h_prefix_sum_temp, temp_size, h_Car_active,
h_prefix_sum_output, prefix_sum_size);
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_compact_initialize),
dim3((kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize), dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_compact_cars), dim3((kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_compact_swap_pointers), dim3(1), dim3(1), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
auto *tmp = h_Car_active;
h_Car_active = h_Car_active_2;
h_Car_active_2 = tmp;
}
void init(obj_alloc *alloc) {
allocate_memory(alloc);
create_street_network(alloc);
}
int main(int /*argc*/, char ** argv) {
hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024);
obj_alloc my_obj_alloc(&shared_mem, atoll(argv[1]));
init(&my_obj_alloc);
my_obj_alloc.toDevice();
printf("mem alloc done\n");
my_obj_alloc.create_table();
vfun_table = my_obj_alloc.get_vfun_table();
auto time_start = std::chrono::system_clock::now();
for (int i = 0; i < kNumIterations; ++i) {
//printf("%d\n",i);
step();
compact_car_array();
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto millis =
std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count();
#ifndef NDEBUG
printf("Checksum: %i\n", checksum());
#endif // NDEBUG
printf("%lu\n", millis);
} | df43e82fe53dfa56032baac6379904f177689d93.cu | #include "traffic.h"
static const int kNumBlockSize = 256;
static const char kCellTypeNormal = 1;
static const char kCellTypeProducer = 2;
using IndexT = int;
using CellPointerT = IndexT;
#include "../dataset.h"
__managed__ CellBase **dev_cells;
// Need 2 arrays of both, so we can swap.
__device__ int *d_Car_active;
__device__ int *d_Car_active_2;
__managed__ CarBase **dev_cars;
__managed__ CarBase **dev_cars_2;
// For prefix sum array compaction.
__device__ int *d_prefix_sum_temp;
__device__ int *d_prefix_sum_output;
int *h_prefix_sum_temp;
int *h_prefix_sum_output;
int *h_Car_active;
int *h_Car_active_2;
__device__ int d_num_cells;
__device__ int d_num_cars;
__device__ int d_num_cars_2;
int host_num_cells;
int host_num_cars;
// TODO: Consider migrating to SoaAlloc.
TrafficLight *h_traffic_lights;
__managed__ TrafficLightBase **d_traffic_lights;
// Only for rendering.
__device__ int dev_num_cells;
__device__ float *dev_Cell_pos_x;
__device__ float *dev_Cell_pos_y;
__device__ bool *dev_Cell_occupied;
float *host_Cell_pos_x;
float *host_Cell_pos_y;
bool *host_Cell_occupied;
float *host_data_Cell_pos_x;
float *host_data_Cell_pos_y;
bool *host_data_Cell_occupied;
// coal ready
__device__ void Car_step_extend_path(IndexT self) {
void **vtable;
//if(self == 0 ) printf("-----------$self %p \n",dev_cars[self]);
COAL_CarBase_get_position(dev_cars[self]);
CellBase *cell = CLEANPTR( dev_cars[self] ,CarBase *)->get_position();
CellBase *next_cell;
COAL_CarBase_get_velocity(dev_cars[self]);
for (int i = 0; i < CLEANPTR( dev_cars[self] ,CarBase *)->get_velocity(); ++i) {
COAL_CellBase_get_is_target(cell);
bool cond = CLEANPTR( cell ,CellBase *)->get_is_target();
COAL_CellBase_is_sink(cell);
if (CLEANPTR( cell ,CellBase *)->is_sink() || cond) {
break;
}
COAL_CarBase_next_step(dev_cars[self]);
next_cell = CLEANPTR( dev_cars[self] ,CarBase *)->next_step(cell);
assert(next_cell != cell);
// printf ("hiiiii %p %d\n",next_cell, self);
COAL_CellBase_is_free(next_cell);
if (!(CLEANPTR( next_cell ,CellBase *)->is_free())) break;
cell = next_cell;
COAL_CarBase_set_path(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_path(cell, i);
COAL_CarBase_get_path_length(dev_cars[self]);
int path_len = CLEANPTR( dev_cars[self] ,CarBase *)->get_path_length();
COAL_CarBase_set_path_length(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_path_length(path_len + 1);
}
COAL_CarBase_get_path_length(dev_cars[self]);
int path_len = CLEANPTR( dev_cars[self] ,CarBase *)->get_path_length();
COAL_CarBase_set_velocity(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_velocity(path_len);
}
__device__ void Car_step_constraint_velocity(IndexT self) {
void **vtable;
// This is actually only needed for the very first iteration, because a car
// may be positioned on a traffic light cell.
COAL_CarBase_get_velocity(dev_cars[self]);
int vel = CLEANPTR( dev_cars[self] ,CarBase *)->get_velocity();
COAL_CarBase_get_position(dev_cars[self]);
CellBase *cell = CLEANPTR( dev_cars[self] ,CarBase *)->get_position();
COAL_CellBase_get_current_max_velocity(cell);
if (vel > CLEANPTR( cell ,CellBase *)->get_current_max_velocity()) {
COAL_CellBase_get_current_max_velocity(cell);
int max_velocity = CLEANPTR( cell ,CellBase *)->get_current_max_velocity();
COAL_CarBase_set_velocity(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_velocity(max_velocity);
}
int path_index = 0;
int distance = 1;
COAL_CarBase_get_velocity(dev_cars[self]);
while (distance <= CLEANPTR( dev_cars[self] ,CarBase *)->get_velocity()) {
// Invariant: Movement of up to `distance - 1` many cells at `velocity_`
// is allowed.
// Now check if next cell can be entered.
COAL_CarBase_get_path(dev_cars[self]);
CellBase *next_cell = CLEANPTR( dev_cars[self] ,CarBase *)->get_path(path_index);
COAL_CellBase_is_free(next_cell);
// Avoid collision.
if (!CLEANPTR( next_cell ,CellBase *)->is_free()) {
// Cannot enter cell.
--distance;
COAL_CarBase_set_velocity(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_velocity(distance);
break;
} // else: Can enter next cell.
COAL_CarBase_get_velocity(dev_cars[self]);
int curr_vel = CLEANPTR( dev_cars[self] ,CarBase *)->get_velocity();
COAL_CellBase_get_current_max_velocity(next_cell);
if (curr_vel > CLEANPTR( next_cell ,CellBase *)->get_current_max_velocity()) {
// Car is too fast for this cell.
COAL_CellBase_get_current_max_velocity(next_cell);
if (CLEANPTR( next_cell ,CellBase *)->get_current_max_velocity() > distance - 1) {
// Even if we slow down, we would still make progress.
COAL_CellBase_get_current_max_velocity(next_cell);
int max = CLEANPTR( next_cell ,CellBase *)->get_current_max_velocity();
COAL_CarBase_set_velocity(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_velocity(max);
} else {
// Do not enter the next cell.
--distance;
assert(distance >= 0);
COAL_CarBase_set_velocity(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_velocity(distance);
break;
}
}
++distance;
++path_index;
}
--distance;
#ifndef NDEBUG
COAL_CarBase_get_velocity(dev_cars[self]);
for (int i = 0; i < CLEANPTR( dev_cars[self] ,CarBase *)->get_velocity(); ++i) {
COAL_CarBase_get_path(dev_cars[self]);
CellBase *path = CLEANPTR( dev_cars[self] ,CarBase *)->get_path(i);
COAL_CellBase_is_free(path);
assert(CLEANPTR( path ,CellBase *)->is_free());
COAL_CarBase_get_path(dev_cars[self]);
assert(i == 0 || CLEANPTR( dev_cars[self] ,CarBase *)->get_path(i - 1) != path);
}
// TODO: Check why the cast is necessary.
COAL_CarBase_get_velocity(dev_cars[self]);
assert(distance <= CLEANPTR( dev_cars[self] ,CarBase *)->get_velocity());
#endif // NDEBUG
}
__device__ void Car_step_move(IndexT self) {
void **vtable;
COAL_CarBase_get_position(dev_cars[self]);
CellBase *cell = CLEANPTR( dev_cars[self] ,CarBase *)->get_position();
COAL_CarBase_get_velocity(dev_cars[self]);
for (int i = 0; i < CLEANPTR( dev_cars[self] ,CarBase *)->get_velocity(); ++i) {
COAL_CarBase_get_path(dev_cars[self]);
assert(CLEANPTR( dev_cars[self] ,CarBase *)->get_path(i) != cell);
COAL_CarBase_get_path(dev_cars[self]);
cell = CLEANPTR( dev_cars[self] ,CarBase *)->get_path(i);
COAL_CellBase_is_free(cell);
assert(CLEANPTR( cell ,CellBase *)->is_free());
COAL_CarBase_get_position(dev_cars[self]);
CellBase *ptr = CLEANPTR( dev_cars[self] ,CarBase *)->get_position();
COAL_CellBase_release(ptr);
CLEANPTR( ptr ,CellBase *)->release();
COAL_CellBase_occupy(cell);
CLEANPTR( cell ,CellBase *)->occupy(dev_cars[self]);
COAL_CarBase_set_position(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_position(cell);
}
COAL_CarBase_get_position(dev_cars[self]);
CellBase *ptr = CLEANPTR( dev_cars[self] ,CarBase *)->get_position();
COAL_CellBase_is_sink(ptr);
bool cond = CLEANPTR( ptr ,CellBase *)->is_sink();
COAL_CellBase_get_is_target(ptr);
if (cond || CLEANPTR( ptr ,CellBase *)->get_is_target()) {
// Remove car from the simulation. Will be added again in the next
// iteration.
COAL_CellBase_release(ptr);
CLEANPTR( ptr ,CellBase *)->release();
COAL_CarBase_set_position(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->set_position(nullptr);
d_Car_active[self] = 0;
}
}
__device__ void Car_step_slow_down(IndexT self) {
void **vtable;
// 20% change of slowdown.
CarBase *ptr = dev_cars[self];
COAL_CarBase_get_velocity(ptr);
int vel = CLEANPTR( ptr ,CarBase *)->get_velocity();
COAL_CarBase_random_uni(ptr);
if (CLEANPTR( ptr ,CarBase *)->random_uni() < 0.2 && vel > 0) {
COAL_CarBase_set_velocity(ptr);
CLEANPTR( ptr ,CarBase *)->set_velocity(vel - 1);
}
}
__device__ IndexT new_Car(int seed, IndexT cell, int max_velocity) {
void **vtable;
IndexT idx = atomicAdd(&d_num_cars, 1);
assert(idx >= 0 && idx < kMaxNumCars);
CarBase *ptr = dev_cars[idx];
assert(!d_Car_active[idx]);
COAL_CarBase_set_position(ptr);
CLEANPTR( ptr ,CarBase *)->set_position(dev_cells[cell]);
COAL_CarBase_set_path_length(ptr);
CLEANPTR( ptr ,CarBase *)->set_path_length(0);
COAL_CarBase_set_velocity(ptr);
CLEANPTR( ptr ,CarBase *)->set_velocity(0);
COAL_CarBase_set_max_velocity(ptr);
CLEANPTR( ptr ,CarBase *)->set_max_velocity(max_velocity);
d_Car_active[idx] = 1;
COAL_CellBase_is_free(dev_cells[cell]);
assert(CLEANPTR( dev_cells[cell] ,CellBase *)->is_free());
COAL_CellBase_occupy(dev_cells[cell]);
CLEANPTR( dev_cells[cell] ,CellBase *)->occupy(dev_cars[idx]);
curand_init(seed, 0, 0, &CLEANPTR( ptr ,CarBase *)->random_state);
return idx;
}
__device__ void ProducerCell_create_car(IndexT self) {
void **vtable;
assert(CLEANPTR( dev_cells[self] ,CellBase *)->type == kCellTypeProducer);
COAL_CellBase_is_free(dev_cells[self]);
if (CLEANPTR( dev_cells[self] ,CellBase *)->is_free()) {
float r = curand_uniform(&CLEANPTR( dev_cells[self] ,CellBase *)->random_state);
if (r < kCarAllocationRatio) {
IndexT new_car = new_Car(
/*seed=*/curand(&CLEANPTR( dev_cells[self] ,CellBase *)->random_state), /*cell=*/self,
/*max_velocity=*/curand(&CLEANPTR( dev_cells[self] ,CellBase *)->random_state) %
(kMaxVelocity / 2) +
kMaxVelocity / 2);
}
}
}
__device__ IndexT new_Cell(int max_velocity, float x, float y) {
IndexT idx = atomicAdd(&d_num_cells, 1);
CLEANPTR(dev_cells[idx],CellBase *)->car = nullptr;
CLEANPTR(dev_cells[idx],CellBase *)->max_velocity = max_velocity;
CLEANPTR(dev_cells[idx],CellBase *)->current_max_velocity = max_velocity;
CLEANPTR(dev_cells[idx],CellBase *)->num_incoming = 0;
CLEANPTR(dev_cells[idx],CellBase *)->num_outgoing = 0;
CLEANPTR(dev_cells[idx],CellBase *)->x = x;
CLEANPTR(dev_cells[idx],CellBase *)->y = y;
CLEANPTR(dev_cells[idx],CellBase *)->is_target = false;
CLEANPTR(dev_cells[idx],CellBase *)->type = kCellTypeNormal;
return idx;
}
__device__ IndexT new_ProducerCell(int max_velocity, float x, float y,
int seed) {
IndexT idx = new_Cell(max_velocity, x, y);
CLEANPTR(dev_cells[idx],CellBase *)->type = kCellTypeProducer;
curand_init(seed, 0, 0, &CLEANPTR(dev_cells[idx],CellBase *)->random_state);
return idx;
}
__global__ void kernel_traffic_light_step() {
void **vtable;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections;
i += blockDim.x * gridDim.x) {
TrafficLightBase *ptr = d_traffic_lights[i];
COAL_TrafficLightBase_get_num_cells(ptr);
if (CLEANPTR( ptr ,TrafficLightBase *)->get_num_cells() > 0) {
COAL_TrafficLightBase_get_timer(ptr);
int timer = CLEANPTR( ptr ,TrafficLightBase *)->get_timer();
COAL_TrafficLightBase_get_phase_time(ptr);
int phase_time = CLEANPTR( ptr ,TrafficLightBase *)->get_phase_time();
COAL_TrafficLightBase_set_timer(ptr);
CLEANPTR( ptr ,TrafficLightBase *)->set_timer((timer + 1) % phase_time);
COAL_TrafficLightBase_get_timer(ptr);
if (CLEANPTR( ptr ,TrafficLightBase *)->get_timer() == 0) {
COAL_TrafficLightBase_get_phase(ptr);
int phase = CLEANPTR( ptr ,TrafficLightBase *)->get_phase();
COAL_TrafficLightBase_get_cell(ptr);
assert(CLEANPTR( ptr ,TrafficLightBase *)->get_cell(phase) != nullptr);
COAL_TrafficLightBase_get_phase(ptr);
phase = CLEANPTR( ptr ,TrafficLightBase *)->get_phase();
COAL_TrafficLightBase_get_cell(ptr);
CellBase *ptr2 = CLEANPTR( ptr ,TrafficLightBase *)->get_cell(phase);
COAL_CellBase_set_current_max_velocity(ptr2);
CLEANPTR( ptr2 ,CellBase *)->set_current_max_velocity(0);
COAL_TrafficLightBase_get_phase(ptr);
int phase_2 = CLEANPTR( ptr ,TrafficLightBase *)->get_phase();
COAL_TrafficLightBase_get_num_cells(ptr);
int num_cells = CLEANPTR( ptr ,TrafficLightBase *)->get_num_cells();
COAL_TrafficLightBase_set_phase(ptr);
CLEANPTR( ptr ,TrafficLightBase *)->set_phase((phase_2 + 1) % num_cells);
COAL_TrafficLightBase_get_phase(ptr);
phase_2 = CLEANPTR( ptr ,TrafficLightBase *)->get_phase();
COAL_TrafficLightBase_get_cell(ptr);
ptr2 = CLEANPTR( ptr ,TrafficLightBase *)->get_cell(phase_2);
COAL_CellBase_remove_speed_limit(ptr2);
CLEANPTR( ptr2 ,CellBase *)->remove_speed_limit();
}
}
// d_traffic_lights[i]->step();
}
}
__global__ void kernel_create_nodes() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections;
i += blockDim.x * gridDim.x) {
curandState_t state;
curand_init(i, 0, 0, &state);
assert(d_nodes[i].x >= 0 && d_nodes[i].x <= 1);
assert(d_nodes[i].y >= 0 && d_nodes[i].y <= 1);
for (int j = 0; j < d_nodes[i].num_outgoing; ++j) {
d_nodes[i].cell_out[j] = new_Cell(
/*max_velocity=*/curand(&state) % (kMaxVelocity / 2) +
kMaxVelocity / 2,
d_nodes[i].x, d_nodes[i].y);
}
}
}
__device__ IndexT connect_intersections(IndexT from, Node *target,
int incoming_idx,
curandState_t &state) {
// Create edge.
float dx = target->x - CLEANPTR( dev_cells[from] ,CellBase *)->x;
float dy = target->y - CLEANPTR( dev_cells[from] ,CellBase *)->y;
float dist = sqrt(dx * dx + dy * dy);
int steps = dist / kCellLength;
float step_x = dx / steps;
float step_y = dy / steps;
IndexT prev = from;
for (int j = 0; j < steps; ++j) {
float new_x = CLEANPTR( dev_cells[from] ,CellBase *)->x + j * step_x;
float new_y = CLEANPTR( dev_cells[from] ,CellBase *)->y + j * step_y;
assert(new_x >= 0 && new_x <= 1);
assert(new_y >= 0 && new_y <= 1);
IndexT next;
if (curand_uniform(&state) < kProducerRatio) {
next = new_ProducerCell(CLEANPTR( dev_cells[prev] ,CellBase *)->max_velocity, new_x, new_y,
curand(&state));
} else {
next = new_Cell(CLEANPTR( dev_cells[prev] ,CellBase *)->max_velocity, new_x, new_y);
}
if (curand_uniform(&state) < kTargetRatio) {
CLEANPTR( dev_cells[next] ,CellBase *)->set_target();
}
CLEANPTR( dev_cells[prev] ,CellBase *)->set_num_outgoing(1);
CLEANPTR( dev_cells[prev] ,CellBase *)->set_outgoing(0,dev_cells[next] );
CLEANPTR( dev_cells[next] ,CellBase *)->set_num_incoming(1);
CLEANPTR( dev_cells[next] ,CellBase *)->set_incoming(0, dev_cells[prev]);
prev = next;
}
// Connect to all outgoing nodes of target.
CLEANPTR( dev_cells[prev] ,CellBase *)->set_num_outgoing(target->num_outgoing);
for (int i = 0; i < target->num_outgoing; ++i) {
IndexT next = target->cell_out[i];
// num_incoming set later.
CLEANPTR( dev_cells[prev] ,CellBase *)->set_outgoing(i, dev_cells[next] );
CLEANPTR( dev_cells[next] ,CellBase *)->set_incoming(incoming_idx, dev_cells[prev] );
}
return prev;
}
__global__ void kernel_create_edges() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections;
i += blockDim.x * gridDim.x) {
curandState_t state;
curand_init(i, 0, 0, &state);
for (int k = 0; k < d_nodes[i].num_outgoing; ++k) {
int target = d_nodes[i].node_out[k];
int target_pos = d_nodes[i].node_out_pos[k];
IndexT last = connect_intersections(
d_nodes[i].cell_out[k], &d_nodes[target], target_pos, state);
CLEANPTR(dev_cells[last],CellBase *)->set_current_max_velocity(0);
d_nodes[target].cell_in[target_pos] = last;
}
}
}
__global__ void kernel_create_traffic_lights() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections;
i += blockDim.x * gridDim.x) {
new (CLEANPTR(d_traffic_lights[i], TrafficLightBase *)) TrafficLight(
/*num_cells=*/d_nodes[i].num_incoming,
/*phase_time=*/5);
for (int j = 0; j < d_nodes[i].num_outgoing; ++j) {
CLEANPTR( dev_cells[d_nodes[i].cell_out[j]],CellBase *) ->set_num_incoming(
d_nodes[i].num_incoming);
}
for (int j = 0; j < d_nodes[i].num_incoming; ++j) {
CLEANPTR(d_traffic_lights[i], TrafficLightBase *)->set_cell(j, dev_cells[d_nodes[i].cell_in[j]]);
CLEANPTR(dev_cells[d_nodes[i].cell_in[j]],CellBase *)->set_current_max_velocity(
0); // Set to "red".
}
}
}
template <class Type, class TypeBase>
__global__ void device_alloc(TypeBase **ptr, int size) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size;
i += blockDim.x * gridDim.x) {
ptr[i] = new Type();
assert(ptr[i] != nullptr);
}
}
void create_street_network(obj_alloc *alloc) {
int zero = 0;
cudaMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&h_nodes, sizeof(Node) * kNumIntersections);
cudaMemcpyToSymbol(d_nodes, &h_nodes, sizeof(Node *), 0,
cudaMemcpyHostToDevice);
// cudaMalloc(&d_traffic_lights, sizeof(TrafficLight *) *
// kNumIntersections);
// device_alloc<TrafficLight, TrafficLightBase>
// <<<(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize,
// kNumBlockSize>>>(d_traffic_lights, kNumIntersections);
// gpuErrchk(cudaDeviceSynchronize());
d_traffic_lights = (TrafficLightBase **)alloc->calloc<TrafficLightBase *>(
kNumIntersections);
for (int i = 0; i < kNumIntersections; i += 1) {
d_traffic_lights[i] = (TrafficLightBase *)alloc->my_new<TrafficLight>();
}
alloc->toDevice();
// Create basic structure on host.
create_network_structure();
kernel_create_nodes<<<(kNumIntersections + kNumBlockSize - 1) /
kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_create_edges<<<(kNumIntersections + kNumBlockSize - 1) /
kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_create_traffic_lights<<<(kNumIntersections + kNumBlockSize - 1) /
kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
// Allocate helper data structures for rendering.
cudaMemcpyFromSymbol(&host_num_cells, d_num_cells, sizeof(int), 0,
cudaMemcpyDeviceToHost);
cudaMalloc(&host_Cell_pos_x, sizeof(float) * host_num_cells);
cudaMemcpyToSymbol(dev_Cell_pos_x, &host_Cell_pos_x, sizeof(float *), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&host_Cell_pos_y, sizeof(float) * host_num_cells);
cudaMemcpyToSymbol(dev_Cell_pos_y, &host_Cell_pos_y, sizeof(float *), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&host_Cell_occupied, sizeof(bool) * host_num_cells);
cudaMemcpyToSymbol(dev_Cell_occupied, &host_Cell_occupied, sizeof(bool *),
0, cudaMemcpyHostToDevice);
host_data_Cell_pos_x = (float *)malloc(sizeof(float) * host_num_cells);
host_data_Cell_pos_y = (float *)malloc(sizeof(float) * host_num_cells);
host_data_Cell_occupied = (bool *)malloc(sizeof(bool) * host_num_cells);
#ifndef NDEBUG
printf("Number of cells: %i\n", host_num_cells);
#endif // NDEBUG
}
void step_traffic_lights() {
// TODO: Consider migrating this to SoaAlloc.
kernel_traffic_light_step<<<(kNumIntersections + kNumBlockSize - 1) /
kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
}
__device__ void Cell_add_to_rendering_array(IndexT self) {
int idx = atomicAdd(&dev_num_cells, 1);
dev_Cell_pos_x[idx] = dev_cells[self]->x;
dev_Cell_pos_y[idx] = dev_cells[self]->y;
dev_Cell_occupied[idx] = !dev_cells[self]->is_free();
}
__global__ void kernel_Cell_add_to_rendering_array() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cells;
i += blockDim.x * gridDim.x) {
Cell_add_to_rendering_array(i);
}
}
void transfer_data() {
int zero = 0;
cudaMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0,
cudaMemcpyHostToDevice);
kernel_Cell_add_to_rendering_array<<<(host_num_cells + kNumBlockSize - 1) /
kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
cudaMemcpy(host_data_Cell_pos_x, host_Cell_pos_x,
sizeof(float) * host_num_cells, cudaMemcpyDeviceToHost);
cudaMemcpy(host_data_Cell_pos_y, host_Cell_pos_y,
sizeof(float) * host_num_cells, cudaMemcpyDeviceToHost);
cudaMemcpy(host_data_Cell_occupied, host_Cell_occupied,
sizeof(bool) * host_num_cells, cudaMemcpyDeviceToHost);
gpuErrchk(cudaDeviceSynchronize());
}
__global__ void kernel_ProducerCell_create_car() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cells;
i += blockDim.x * gridDim.x) {
if (CLEANPTR( dev_cells[i] ,CellBase *)->type == kCellTypeProducer) {
ProducerCell_create_car(i);
}
}
}
__device__ void Car_step_prepare_path(IndexT self) {
void **vtable;
COAL_CarBase_step_initialize_iteration(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->step_initialize_iteration();
COAL_CarBase_step_accelerate(dev_cars[self]);
CLEANPTR( dev_cars[self] ,CarBase *)->step_accelerate();
Car_step_extend_path(self);
Car_step_constraint_velocity(self);
Car_step_slow_down(self);
}
__global__ void kernel_Car_step_prepare_path() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
if (d_Car_active[i]) {
Car_step_prepare_path(i);
}
}
}
__global__ void kernel_fill_car_indices() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
d_Car_active[i] = 0;
d_Car_active_2[i] = 0;
}
}
__global__ void kernel_Car_step_move() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
if (d_Car_active[i]) {
Car_step_move(i);
}
}
}
__device__ int d_checksum;
__global__ void kernel_compute_checksum() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
if (d_Car_active[i]) {
atomicAdd(&d_checksum, 1);
}
}
}
int checksum() {
int zero = 0;
cudaMemcpyToSymbol(d_checksum, &zero, sizeof(int), 0,
cudaMemcpyHostToDevice);
kernel_compute_checksum<<<128, 128>>>();
int result;
cudaMemcpyFromSymbol(&result, d_checksum, sizeof(int), 0,
cudaMemcpyDeviceToHost);
return result;
}
void step() {
kernel_ProducerCell_create_car<<<(host_num_cells + kNumBlockSize - 1) /
kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
cudaMemcpyFromSymbol(&host_num_cars, d_num_cars, sizeof(int), 0,
cudaMemcpyDeviceToHost);
step_traffic_lights();
kernel_Car_step_prepare_path<<<
(host_num_cars + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_Car_step_move<<<(host_num_cars + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
}
void allocate_memory(obj_alloc *alloc) {
// cudaMalloc(&dev_cells, sizeof(Cell *) * kMaxNumCells);
dev_cells = (CellBase **)alloc->calloc<CellBase *>(kMaxNumCells);
for (int i = 0; i < kMaxNumCells; i += 1) {
dev_cells[i] = (CellBase *)alloc->my_new<Cell>();
}
for (int i = 0; i < kMaxNumCells; i += 1) {
(dummy *)alloc->my_new<dummy>();
}
// device_alloc<Cell, CellBase>
// <<<(kMaxNumCells + kNumBlockSize - 1) / kNumBlockSize,
// kNumBlockSize>>>(
// dev_cells, kMaxNumCells);
gpuErrchk(cudaDeviceSynchronize());
// cudaMalloc(&dev_cars, sizeof(Car *) * kMaxNumCars);
// cudaMalloc(&dev_cars_2, sizeof(Car *) * kMaxNumCars);
dev_cars = (CarBase **)alloc->calloc<CarBase *>(kMaxNumCars);
for (int i = 0; i < kMaxNumCars; i += 1) {
dev_cars[i] = (CarBase *)alloc->my_new<Car>();
}
dev_cars_2 = (CarBase **)alloc->calloc<CarBase *>(kMaxNumCars);
for (int i = 0; i < kMaxNumCars; i += 1) {
dev_cars_2[i] = (CarBase *)alloc->my_new<Car>();
}
alloc->toDevice();
// device_alloc<Car, CarBase>
// <<<(kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize,
// kNumBlockSize>>>(
// dev_cars, kMaxNumCars);
// gpuErrchk(cudaDeviceSynchronize());
// device_alloc<Car>
// <<<(kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize,
// kNumBlockSize>>>(
// dev_cars_2, kMaxNumCars);
gpuErrchk(cudaDeviceSynchronize());
cudaMalloc(&h_Car_active, sizeof(int) * kMaxNumCars);
cudaMemcpyToSymbol(d_Car_active, &h_Car_active, sizeof(int *), 0,
cudaMemcpyHostToDevice);
// Car *h_cars_2;
// cudaMalloc(&h_cars_2, sizeof(Car) * kMaxNumCars);
// cudaMemcpyToSymbol(dev_cars_2, &h_cars_2, sizeof(Car *), 0,
// cudaMemcpyHostToDevice);
cudaMalloc(&h_Car_active_2, sizeof(int) * kMaxNumCars);
cudaMemcpyToSymbol(d_Car_active_2, &h_Car_active_2, sizeof(int *), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&h_prefix_sum_temp, 3 * sizeof(int) * kMaxNumCars);
cudaMemcpyToSymbol(d_prefix_sum_temp, &h_prefix_sum_temp, sizeof(int *), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&h_prefix_sum_output, sizeof(int) * kMaxNumCars);
cudaMemcpyToSymbol(d_prefix_sum_output, &h_prefix_sum_output, sizeof(int *),
0, cudaMemcpyHostToDevice);
kernel_fill_car_indices<<<128, 128>>>();
gpuErrchk(cudaDeviceSynchronize());
int zero = 0;
cudaMemcpyToSymbol(d_num_cells, &zero, sizeof(int), 0,
cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_num_cars, &zero, sizeof(int), 0,
cudaMemcpyHostToDevice);
}
__global__ void kernel_compact_initialize() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kMaxNumCars;
i += blockDim.x * gridDim.x) {
d_Car_active_2[i] = 0;
}
}
__global__ void kernel_compact_cars() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
if (d_Car_active[i]) {
int target = d_prefix_sum_output[i];
// Copy i --> target.
// dev_cars_2[target] = dev_cars[i];
memcpy(CLEANPTR( dev_cars_2[target] ,CarBase *), CLEANPTR( dev_cars[i] ,CarBase *), sizeof(Car));
d_Car_active_2[target] = 1;
// Update pointer in Cell.
CLEANPTR(CLEANPTR( dev_cars[i] ,CarBase *)->position,CellBase *)->car = dev_cars[target];
atomicAdd(&d_num_cars_2, 1);
}
}
}
__global__ void kernel_compact_swap_pointers() {
{
auto *tmp = dev_cars;
dev_cars = dev_cars_2;
dev_cars_2 = tmp;
}
{
auto *tmp = d_Car_active;
d_Car_active = d_Car_active_2;
d_Car_active_2 = tmp;
}
d_num_cars = d_num_cars_2;
}
void compact_car_array() {
int zero = 0;
cudaMemcpyToSymbol(d_num_cars_2, &zero, sizeof(int), 0,
cudaMemcpyHostToDevice);
cudaMemcpyFromSymbol(&host_num_cars, d_num_cars, sizeof(int), 0,
cudaMemcpyDeviceToHost);
// TODO: Prefix sum broken for num_objects < 256.
auto prefix_sum_size = host_num_cars < 256 ? 256 : host_num_cars;
size_t temp_size = 3 * kMaxNumCars;
cub::DeviceScan::ExclusiveSum(h_prefix_sum_temp, temp_size, h_Car_active,
h_prefix_sum_output, prefix_sum_size);
gpuErrchk(cudaDeviceSynchronize());
kernel_compact_initialize<<<
(kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_compact_cars<<<(kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_compact_swap_pointers<<<1, 1>>>();
gpuErrchk(cudaDeviceSynchronize());
auto *tmp = h_Car_active;
h_Car_active = h_Car_active_2;
h_Car_active_2 = tmp;
}
void init(obj_alloc *alloc) {
allocate_memory(alloc);
create_street_network(alloc);
}
int main(int /*argc*/, char ** argv) {
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024);
obj_alloc my_obj_alloc(&shared_mem, atoll(argv[1]));
init(&my_obj_alloc);
my_obj_alloc.toDevice();
printf("mem alloc done\n");
my_obj_alloc.create_table();
vfun_table = my_obj_alloc.get_vfun_table();
auto time_start = std::chrono::system_clock::now();
for (int i = 0; i < kNumIterations; ++i) {
//printf("%d\n",i);
step();
compact_car_array();
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto millis =
std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count();
#ifndef NDEBUG
printf("Checksum: %i\n", checksum());
#endif // NDEBUG
printf("%lu\n", millis);
} |
c0f8a904fae6c74ebe89c8fb48cb21f570f842e9.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Implements several functions declared in source/culinalg-cuheader.cuh
*/
#include<sources/culinalg-cuheader.cuh>
#include<headers/culinalg-exceptions.hpp>
void clg::copyCuData(const CuData& dst, const CuData& src, size_t count)
{
// Check that CuDatas point to valid data.
if(!dst.host_data || !src.host_data || !dst.device_data || !src.host_data)
throw clg::CopyFailedException("CuData is invalid or points to no data");
// Perform copy
hipError_t err;
if(dst.host_data_synced)
{
if(src.host_data_synced)
err = hipMemcpy(dst.host_data, src.host_data, count, hipMemcpyHostToHost); //TODO bench
else
err = hipMemcpy(dst.host_data, src.device_data, count, hipMemcpyDeviceToHost);
}
else
{
if(src.host_data_synced)
err = hipMemcpy(dst.device_data, src.host_data, count, hipMemcpyHostToDevice);
else //TODO benc the following
err = hipMemcpy(dst.device_data, src.device_data, count, hipMemcpyDeviceToDevice);
}
// Check for error
clg::wrapCudaError<clg::CopyFailedException>(err);
}
void clg::CuData::reset()
{
host_data = nullptr;
device_data = nullptr;
host_data_synced = true;
}
void clg::CuData::move_from(const CuData& src)
{
host_data = src.host_data;
device_data = src.device_data;
host_data_synced = src.host_data_synced;
}
void clg::CuData::memsync_host(size_t size)
{
// Early return
if(host_data_synced) return;
// Try copying. If copy fails, treating source as correct seems safe
clg::wrapCudaError<clg::CopyFailedException>(hipMemcpy(host_data, device_data, size,
hipMemcpyDeviceToHost));
// Set sync flags
host_data_synced = true;
}
void clg::CuData::memsync_device(size_t size)
{
// Early return
if(!host_data_synced) return;
// Try copyin. If copy fails, treating source as correct seems safeg
clg::wrapCudaError<clg::CopyFailedException>(hipMemcpy(device_data, host_data, size,
hipMemcpyHostToDevice));
// Set sync flags
host_data_synced = false;
}
| c0f8a904fae6c74ebe89c8fb48cb21f570f842e9.cu | /**
* Implements several functions declared in source/culinalg-cuheader.cuh
*/
#include<sources/culinalg-cuheader.cuh>
#include<headers/culinalg-exceptions.hpp>
void clg::copyCuData(const CuData& dst, const CuData& src, size_t count)
{
// Check that CuDatas point to valid data.
if(!dst.host_data || !src.host_data || !dst.device_data || !src.host_data)
throw clg::CopyFailedException("CuData is invalid or points to no data");
// Perform copy
cudaError_t err;
if(dst.host_data_synced)
{
if(src.host_data_synced)
err = cudaMemcpy(dst.host_data, src.host_data, count, cudaMemcpyHostToHost); //TODO bench
else
err = cudaMemcpy(dst.host_data, src.device_data, count, cudaMemcpyDeviceToHost);
}
else
{
if(src.host_data_synced)
err = cudaMemcpy(dst.device_data, src.host_data, count, cudaMemcpyHostToDevice);
else //TODO benc the following
err = cudaMemcpy(dst.device_data, src.device_data, count, cudaMemcpyDeviceToDevice);
}
// Check for error
clg::wrapCudaError<clg::CopyFailedException>(err);
}
void clg::CuData::reset()
{
host_data = nullptr;
device_data = nullptr;
host_data_synced = true;
}
void clg::CuData::move_from(const CuData& src)
{
host_data = src.host_data;
device_data = src.device_data;
host_data_synced = src.host_data_synced;
}
void clg::CuData::memsync_host(size_t size)
{
// Early return
if(host_data_synced) return;
// Try copying. If copy fails, treating source as correct seems safe
clg::wrapCudaError<clg::CopyFailedException>(cudaMemcpy(host_data, device_data, size,
cudaMemcpyDeviceToHost));
// Set sync flags
host_data_synced = true;
}
void clg::CuData::memsync_device(size_t size)
{
// Early return
if(!host_data_synced) return;
// Try copyin. If copy fails, treating source as correct seems safeg
clg::wrapCudaError<clg::CopyFailedException>(cudaMemcpy(device_data, host_data, size,
cudaMemcpyHostToDevice));
// Set sync flags
host_data_synced = false;
}
|
b72daaa231dfebca046acc01888832bf872e9e66.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdint.h>
#define WIDTH 512
#define HEIGHT 512
#define ITERS 512
#define N (WIDTH*HEIGHT)
#define max_size 4
#define max_colors 16
#define xmax 1.2f
#define xmin -2.0f
#define ymax 1.2f
#define ymin -1.2f
#define deltaP ((xmax - xmin)/512)
#define deltaQ ((ymax - ymin)/512)
extern "C" __global__ void mandelGen(uint8_t* output0)
{
extern __shared__ uint8_t sbase[];
uint32_t tid = threadIdx.x;
float v0;
float v1;
uint32_t v2;
float t3;
float t4;
uint32_t t5;
if (blockIdx.x < 512U) {
v0 = 0.0F;
v1 = 0.0F;
v2 = 1U;
while (v0 * v0 + v1 * v1 < 4.0F && v2 < 512U) {
t3 = v0;
t4 = v1;
t5 = v2;
v0 = t3 * t3 - t4 * t4 + (-0.7931140065193176F + (float) tid *
1.3693165965378284e-4F);
v1 = 2.0F * t3 * t4 + (0.1409740000963211F -
(float) blockIdx.x *
2.0146874885540456e-4F);
v2 = t5 + 1U;
}
output0[blockIdx.x * 512U + tid] = (uint8_t) v2 % 16U * 16U;
}
}
__global__ void kernel(uint8_t* output0){
float v3;
float v2;
uint32_t v1;
v3 = 0.0f;
v2 = 0.0f;
v1 = 1;
while (((((v3*v3)+(v2*v2))<4.0f)&&(v1<512))){
float t6;
float t5;
uint32_t t4;
t6 = v3;
t5 = v2;
t4 = v1;
v3 = (((t6*t6)-(t5*t5))+(-2.0f+(((float)threadIdx.x)*6.25e-3f)));
v2 = (((2.0f*t6)*t5)+(1.2f-(((float)blockIdx.x)*4.6875e-3f)));
v1 = (t4+1);
}
output0[((blockIdx.x*512)+threadIdx.x)] = ((((uint8_t)v1)%16)*16);
}
__global__ void plate1(uint8_t* output0){
float v3;
float v2;
uint32_t v1;
v3 = 0.0f;
v2 = 0.0f;
v1 = 1;
while (((((v3*v3)+(v2*v2))<4.0f)&&(v1<512))){
float t6;
float t5;
uint32_t t4;
t6 = v3;
t5 = v2;
t4 = v1;
v3 = (((t6*t6)-(t5*t5))+(-0.69106f+(((float)threadIdx.x)*3.008172e-7f)));
v2 = (((2.0f*t6)*t5)+(0.387228f-(((float)blockIdx.x)*2.4418114e-7f)));
v1 = (t4+1);
}
output0[((blockIdx.x*512)+threadIdx.x)] = ((((uint8_t)v1)%16)*16);
}
__global__ void plate2(uint8_t* output0){
float v3;
float v2;
uint32_t v1;
v3 = 0.0f;
v2 = 0.0f;
v1 = 1;
while (((((v3*v3)+(v2*v2))<4.0f)&&(v1<512))){
float t6;
float t5;
uint32_t t4;
t6 = v3;
t5 = v2;
t4 = v1;
v3 = (((t6*t6)-(t5*t5))+(-0.793114f+(((float)threadIdx.x)*1.3693166e-4f)));
v2 = (((2.0f*t6)*t5)+(0.140974f-(((float)blockIdx.x)*2.0146875e-4f)));
v1 = (t4+1);
}
output0[((blockIdx.x*512)+threadIdx.x)] = ((((uint8_t)v1)%16)*16);
}
__global__ void plate3(uint8_t* output0){
float v3;
float v2;
uint32_t v1;
v3 = 0.0f;
v2 = 0.0f;
v1 = 1;
while (((((v3*v3)+(v2*v2))<4.0f)&&(v1<512))){
float t6;
float t5;
uint32_t t4;
t6 = v3;
t5 = v2;
t4 = v1;
v3 = (((t6*t6)-(t5*t5))+(-0.745464f+(((float)threadIdx.x)*1.4854595e-7f)));
v2 = (((2.0f*t6)*t5)+(0.11303f-(((float)blockIdx.x)*1.23051e-7f)));
v1 = (t4+1);
}
output0[((blockIdx.x*512)+threadIdx.x)] = ((((uint8_t)v1)%16)*16);
}
__global__ void mandel(uint8_t *out) {
int bid = blockIdx.x;
int tid = threadIdx.x;
float x = 0.0, y = 0.0, xsq = 0.0, ysq = 0.0;
int color = 1;
while (color < ITERS && (xsq + ysq) < max_size) {
xsq = x*x;
ysq = y*y;
y = 2*x*y+(ymax - blockIdx.x*deltaQ);
x = xsq - ysq + (xmin + threadIdx.x * deltaP);
color ++;
}
out[bid* 512 + tid] = (color % 8) * 32; // % max_colors;
}
/* ------------------------------------------------------------------------
MAIN
--------------------------------------------------------------------- */
int main(void) {
uint8_t *r;
uint8_t *dr;
r = (uint8_t*)malloc(sizeof(uint8_t) * N);
hipMalloc((void**)&dr,N*sizeof(uint8_t));
hipMemset(dr,0,N*sizeof(uint8_t));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
//mandel<<<HEIGHT,WIDTH,0>>>(dr);
//kernel<<<HEIGHT,WIDTH,0>>>(dr);
//plate1<<<HEIGHT,WIDTH,0>>>(dr);
//plate2<<<HEIGHT,WIDTH,0>>>(dr);
//plate3<<<HEIGHT,WIDTH,0>>>(dr);
hipLaunchKernelGGL(( mandelGen), dim3(HEIGHT),dim3(WIDTH),0, 0, dr);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
// std::cout << std::endl;
hipMemcpy(r,dr,N*sizeof(uint8_t),hipMemcpyDeviceToHost);
//hipMemcpy(m,dm,64*sizeof(float),hipMemcpyDeviceToHost);
for (int i = 0; i < N; i ++) {
printf("%d ",r[i]);
}
printf("Elapsed time: %f\n", elapsedTime);
FILE *file;
file = fopen("image.out","w");
fwrite(r,sizeof(uint8_t),N,file);
fclose(file);
return 0;
}
| b72daaa231dfebca046acc01888832bf872e9e66.cu | #include <cuda.h>
#include <stdio.h>
#include <stdint.h>
#define WIDTH 512
#define HEIGHT 512
#define ITERS 512
#define N (WIDTH*HEIGHT)
#define max_size 4
#define max_colors 16
#define xmax 1.2f
#define xmin -2.0f
#define ymax 1.2f
#define ymin -1.2f
#define deltaP ((xmax - xmin)/512)
#define deltaQ ((ymax - ymin)/512)
extern "C" __global__ void mandelGen(uint8_t* output0)
{
extern __shared__ uint8_t sbase[];
uint32_t tid = threadIdx.x;
float v0;
float v1;
uint32_t v2;
float t3;
float t4;
uint32_t t5;
if (blockIdx.x < 512U) {
v0 = 0.0F;
v1 = 0.0F;
v2 = 1U;
while (v0 * v0 + v1 * v1 < 4.0F && v2 < 512U) {
t3 = v0;
t4 = v1;
t5 = v2;
v0 = t3 * t3 - t4 * t4 + (-0.7931140065193176F + (float) tid *
1.3693165965378284e-4F);
v1 = 2.0F * t3 * t4 + (0.1409740000963211F -
(float) blockIdx.x *
2.0146874885540456e-4F);
v2 = t5 + 1U;
}
output0[blockIdx.x * 512U + tid] = (uint8_t) v2 % 16U * 16U;
}
}
__global__ void kernel(uint8_t* output0){
float v3;
float v2;
uint32_t v1;
v3 = 0.0f;
v2 = 0.0f;
v1 = 1;
while (((((v3*v3)+(v2*v2))<4.0f)&&(v1<512))){
float t6;
float t5;
uint32_t t4;
t6 = v3;
t5 = v2;
t4 = v1;
v3 = (((t6*t6)-(t5*t5))+(-2.0f+(((float)threadIdx.x)*6.25e-3f)));
v2 = (((2.0f*t6)*t5)+(1.2f-(((float)blockIdx.x)*4.6875e-3f)));
v1 = (t4+1);
}
output0[((blockIdx.x*512)+threadIdx.x)] = ((((uint8_t)v1)%16)*16);
}
__global__ void plate1(uint8_t* output0){
float v3;
float v2;
uint32_t v1;
v3 = 0.0f;
v2 = 0.0f;
v1 = 1;
while (((((v3*v3)+(v2*v2))<4.0f)&&(v1<512))){
float t6;
float t5;
uint32_t t4;
t6 = v3;
t5 = v2;
t4 = v1;
v3 = (((t6*t6)-(t5*t5))+(-0.69106f+(((float)threadIdx.x)*3.008172e-7f)));
v2 = (((2.0f*t6)*t5)+(0.387228f-(((float)blockIdx.x)*2.4418114e-7f)));
v1 = (t4+1);
}
output0[((blockIdx.x*512)+threadIdx.x)] = ((((uint8_t)v1)%16)*16);
}
__global__ void plate2(uint8_t* output0){
float v3;
float v2;
uint32_t v1;
v3 = 0.0f;
v2 = 0.0f;
v1 = 1;
while (((((v3*v3)+(v2*v2))<4.0f)&&(v1<512))){
float t6;
float t5;
uint32_t t4;
t6 = v3;
t5 = v2;
t4 = v1;
v3 = (((t6*t6)-(t5*t5))+(-0.793114f+(((float)threadIdx.x)*1.3693166e-4f)));
v2 = (((2.0f*t6)*t5)+(0.140974f-(((float)blockIdx.x)*2.0146875e-4f)));
v1 = (t4+1);
}
output0[((blockIdx.x*512)+threadIdx.x)] = ((((uint8_t)v1)%16)*16);
}
__global__ void plate3(uint8_t* output0){
float v3;
float v2;
uint32_t v1;
v3 = 0.0f;
v2 = 0.0f;
v1 = 1;
while (((((v3*v3)+(v2*v2))<4.0f)&&(v1<512))){
float t6;
float t5;
uint32_t t4;
t6 = v3;
t5 = v2;
t4 = v1;
v3 = (((t6*t6)-(t5*t5))+(-0.745464f+(((float)threadIdx.x)*1.4854595e-7f)));
v2 = (((2.0f*t6)*t5)+(0.11303f-(((float)blockIdx.x)*1.23051e-7f)));
v1 = (t4+1);
}
output0[((blockIdx.x*512)+threadIdx.x)] = ((((uint8_t)v1)%16)*16);
}
__global__ void mandel(uint8_t *out) {
int bid = blockIdx.x;
int tid = threadIdx.x;
float x = 0.0, y = 0.0, xsq = 0.0, ysq = 0.0;
int color = 1;
while (color < ITERS && (xsq + ysq) < max_size) {
xsq = x*x;
ysq = y*y;
y = 2*x*y+(ymax - blockIdx.x*deltaQ);
x = xsq - ysq + (xmin + threadIdx.x * deltaP);
color ++;
}
out[bid* 512 + tid] = (color % 8) * 32; // % max_colors;
}
/* ------------------------------------------------------------------------
MAIN
--------------------------------------------------------------------- */
int main(void) {
uint8_t *r;
uint8_t *dr;
r = (uint8_t*)malloc(sizeof(uint8_t) * N);
cudaMalloc((void**)&dr,N*sizeof(uint8_t));
cudaMemset(dr,0,N*sizeof(uint8_t));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//mandel<<<HEIGHT,WIDTH,0>>>(dr);
//kernel<<<HEIGHT,WIDTH,0>>>(dr);
//plate1<<<HEIGHT,WIDTH,0>>>(dr);
//plate2<<<HEIGHT,WIDTH,0>>>(dr);
//plate3<<<HEIGHT,WIDTH,0>>>(dr);
mandelGen<<<HEIGHT,WIDTH,0>>>(dr);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
// std::cout << std::endl;
cudaMemcpy(r,dr,N*sizeof(uint8_t),cudaMemcpyDeviceToHost);
//cudaMemcpy(m,dm,64*sizeof(float),cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i ++) {
printf("%d ",r[i]);
}
printf("Elapsed time: %f\n", elapsedTime);
FILE *file;
file = fopen("image.out","w");
fwrite(r,sizeof(uint8_t),N,file);
fclose(file);
return 0;
}
|
413bcefd52c6ab343bb1173ef0890eb2bf41c8fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
if(ix < numCols && iy < numRows) {
int offset = ix + iy * numCols;
uchar4 pixel = rgbaImage[offset];
greyImage[offset] = .299f * pixel.x + .587f * pixel.y + .114f * pixel.z;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
size_t BLOCKSIZE = 32;
const dim3 blockSize(BLOCKSIZE, BLOCKSIZE, 1); //TODO
const dim3 gridSize((numCols + BLOCKSIZE - 1) / BLOCKSIZE, (numRows + BLOCKSIZE - 1) / BLOCKSIZE, 1); //TODO
printf("numCols: %lu, numRows: %lu, grid.x: %d, grid.y: %d\n", numCols, numCols, gridSize.x, gridSize.y);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 413bcefd52c6ab343bb1173ef0890eb2bf41c8fe.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
if(ix < numCols && iy < numRows) {
int offset = ix + iy * numCols;
uchar4 pixel = rgbaImage[offset];
greyImage[offset] = .299f * pixel.x + .587f * pixel.y + .114f * pixel.z;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
size_t BLOCKSIZE = 32;
const dim3 blockSize(BLOCKSIZE, BLOCKSIZE, 1); //TODO
const dim3 gridSize((numCols + BLOCKSIZE - 1) / BLOCKSIZE, (numRows + BLOCKSIZE - 1) / BLOCKSIZE, 1); //TODO
printf("numCols: %lu, numRows: %lu, grid.x: %d, grid.y: %d\n", numCols, numCols, gridSize.x, gridSize.y);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
0aed965dc4e8b2f2983b4985275525f6e1892f2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define COMMENT "Histogram_GPU"
#define RGB_COMPONENT_COLOR 255
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
static PPMImage *readPPM(const char *filename) {
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *) malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n",
filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
__global__ void Histogram(PPMImage *cuda_image, float *cuda_h) {
int j, k, l, x = 0;
int index = threadIdx.x + blockIdx.x * blockDim.x;
for (j = 0; j <= 3; j++) {
for (k = 0; k <= 3; k++) {
for (l = 0; l <= 3; l++) {
if (cuda_image->data[index].red == j && cuda_image->data[index].green == k && cuda_image->data[index].blue == l) {
cuda_h[x]++;
}
x++;
}
}
}
}
int main(int argc, char *argv[]) {
if( argc != 2 ) {
printf("Too many or no one arguments supplied.\n");
}
double t_start, t_end;
int i;
char *filename = argv[1]; //Recebendo o arquivo!;
//scanf("%s", filename);
PPMImage *image = readPPM(filename);
PPMImage *cuda_image;
float *h = (float*)malloc(sizeof(float) * 64);
float *cuda_h;
//Inicializar h
for(i=0; i < 64; i++) h[i] = 0.0;
float n = image->y * image->x;
for (i = 0; i < n; i++) {
image->data[i].red = floor((image->data[i].red * 4) / 256);
image->data[i].blue = floor((image->data[i].blue * 4) / 256);
image->data[i].green = floor((image->data[i].green * 4) / 256);
}
hipMalloc ((void **) &cuda_h, sizeof(float) * 64);
hipMalloc ((void **) &cuda_image, sizeof(PPMImage));
hipMemcpy (cuda_h, h, sizeof(float) * 64, hipMemcpyHostToDevice);
hipMemcpy (cuda_image, image, sizeof(PPMImage), hipMemcpyHostToDevice);
t_start = rtclock();
hipLaunchKernelGGL(( Histogram) , dim3(image->x),dim3(image->y), 0, 0, cuda_image, cuda_h);
t_end = rtclock();
hipMemcpy (h, cuda_h, sizeof(float) * 64, hipMemcpyDeviceToHost);
for (i = 0; i < 64; i++){
printf("%0.3f ", h[i] / n);
}
printf("\n");
//fprintf(stdout, "\n%0.6lfs\n", t_end - t_start);
free(h);
hipFree (cuda_h);
hipFree (cuda_image);
}
| 0aed965dc4e8b2f2983b4985275525f6e1892f2b.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define COMMENT "Histogram_GPU"
#define RGB_COMPONENT_COLOR 255
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
static PPMImage *readPPM(const char *filename) {
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *) malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n",
filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
__global__ void Histogram(PPMImage *cuda_image, float *cuda_h) {
int j, k, l, x = 0;
int index = threadIdx.x + blockIdx.x * blockDim.x;
for (j = 0; j <= 3; j++) {
for (k = 0; k <= 3; k++) {
for (l = 0; l <= 3; l++) {
if (cuda_image->data[index].red == j && cuda_image->data[index].green == k && cuda_image->data[index].blue == l) {
cuda_h[x]++;
}
x++;
}
}
}
}
int main(int argc, char *argv[]) {
if( argc != 2 ) {
printf("Too many or no one arguments supplied.\n");
}
double t_start, t_end;
int i;
char *filename = argv[1]; //Recebendo o arquivo!;
//scanf("%s", filename);
PPMImage *image = readPPM(filename);
PPMImage *cuda_image;
float *h = (float*)malloc(sizeof(float) * 64);
float *cuda_h;
//Inicializar h
for(i=0; i < 64; i++) h[i] = 0.0;
float n = image->y * image->x;
for (i = 0; i < n; i++) {
image->data[i].red = floor((image->data[i].red * 4) / 256);
image->data[i].blue = floor((image->data[i].blue * 4) / 256);
image->data[i].green = floor((image->data[i].green * 4) / 256);
}
cudaMalloc ((void **) &cuda_h, sizeof(float) * 64);
cudaMalloc ((void **) &cuda_image, sizeof(PPMImage));
cudaMemcpy (cuda_h, h, sizeof(float) * 64, cudaMemcpyHostToDevice);
cudaMemcpy (cuda_image, image, sizeof(PPMImage), cudaMemcpyHostToDevice);
t_start = rtclock();
Histogram <<<image->x,image->y>>>(cuda_image, cuda_h);
t_end = rtclock();
cudaMemcpy (h, cuda_h, sizeof(float) * 64, cudaMemcpyDeviceToHost);
for (i = 0; i < 64; i++){
printf("%0.3f ", h[i] / n);
}
printf("\n");
//fprintf(stdout, "\n%0.6lfs\n", t_end - t_start);
free(h);
cudaFree (cuda_h);
cudaFree (cuda_image);
}
|
36cc92ecc924a4c4f209e527688ff6e933414100.hip | // !!! This is a file automatically generated by hipify!!!
/* -*- c-basic-offset:2; tab-width:2; indent-tabs-mode:nil -*-
*
* @(#)timing.cu
* @author Karl Ljungkvist <[email protected]>
*
*/
#include <stdio.h>
double timer()
{
hipDeviceSynchronize();
struct timespec ts;
clock_gettime( CLOCK_MONOTONIC_RAW, &ts );
return (double)ts.tv_sec +
(double)ts.tv_nsec / 1000000000.0;
}
| 36cc92ecc924a4c4f209e527688ff6e933414100.cu | /* -*- c-basic-offset:2; tab-width:2; indent-tabs-mode:nil -*-
*
* @(#)timing.cu
* @author Karl Ljungkvist <[email protected]>
*
*/
#include <stdio.h>
double timer()
{
cudaDeviceSynchronize();
struct timespec ts;
clock_gettime( CLOCK_MONOTONIC_RAW, &ts );
return (double)ts.tv_sec +
(double)ts.tv_nsec / 1000000000.0;
}
|
670385a891f54120e4039a317249800a8e3e765d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "./common.h"
#include <stdio.h>
/*
* A simple introduction to programming in CUDA. This program prints "Hello
* World from GPU! from 10 CUDA threads running on the GPU.
*/
__global__ void helloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main(int argc, char **argv)
{
printf("Hello World from CPU!\n");
hipLaunchKernelGGL(( helloFromGPU), dim3(1), dim3(10), 0, 0, );
CHECK(hipDeviceReset());
return 0;
}
| 670385a891f54120e4039a317249800a8e3e765d.cu | #include "./common.h"
#include <stdio.h>
/*
* A simple introduction to programming in CUDA. This program prints "Hello
* World from GPU! from 10 CUDA threads running on the GPU.
*/
__global__ void helloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main(int argc, char **argv)
{
printf("Hello World from CPU!\n");
helloFromGPU<<<1, 10>>>();
CHECK(cudaDeviceReset());
return 0;
}
|
9425856776525c6e8d4786673c1d7804a475147b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/nd_index_slice_kernels.h"
#include "oneflow/core/cuda/atomic.cuh"
namespace oneflow {
namespace {
template<typename T, typename I>
__global__ void CudaGatherNd(NdIndexSliceArgs<T, I> args, const I* indices, const T* dense,
T* slices) {
DoGatherNd(args.num_slices * args.slice_size, args.slice_size, args.index_ndims, args.dense_shape,
indices, dense, slices);
}
template<typename T, typename I>
__global__ void CudaScatterNdAdd(NdIndexSliceArgs<T, I> args, const I* indices, const T* slices,
T* dense) {
DoScatterNdAdd<DeviceType::kGPU>(args.num_slices * args.slice_size, args.slice_size,
args.index_ndims, args.dense_shape, indices, slices, dense);
}
template<typename T, typename I>
__global__ void CudaFillByNdIndex(NdIndexSliceArgs<T, I> args, const I* indices, T* dense,
T value) {
DoFillByNdIndex(args.num_slices * args.slice_size, args.slice_size, args.index_ndims,
args.dense_shape, indices, dense, value);
}
} // namespace
template<typename T, typename I>
struct GatherNdFunctor<DeviceType::kGPU, T, I> final {
void operator()(ep::Stream* stream, const NdIndexSliceArgs<T, I>& args, const I* indices,
const T* dense, T* slices) const {
RUN_CUDA_KERNEL((CudaGatherNd<T, I>), stream, args.num_slices * args.slice_size, args, indices,
dense, slices);
}
};
template<typename T, typename I>
struct ScatterNdAddFunctor<DeviceType::kGPU, T, I> final {
void operator()(ep::Stream* stream, const NdIndexSliceArgs<T, I>& args, const I* indices,
const T* slices, T* dense) const {
RUN_CUDA_KERNEL((CudaScatterNdAdd<T, I>), stream, args.num_slices * args.slice_size, args,
indices, slices, dense);
}
};
template<typename T, typename I>
struct FillByNdIndexFunctor<DeviceType::kGPU, T, I> final {
void operator()(ep::Stream* stream, const NdIndexSliceArgs<T, I>& args, const I* indices,
T* dense, T value) const {
RUN_CUDA_KERNEL((CudaFillByNdIndex<T, I>), stream, args.num_slices * args.slice_size, args,
indices, dense, value);
}
};
template<typename T>
struct DeviceAdd<DeviceType::kGPU, T> {
__device__ __forceinline__ static void Invoke(const T* x, T* y) { cuda::atomic::Add(y, *x); }
};
template<>
struct DeviceAdd<DeviceType::kGPU, uint8_t> {
__device__ __forceinline__ static void Invoke(const uint8_t* x, uint8_t* y) { *y += *x; }
};
template<>
struct DeviceAdd<DeviceType::kGPU, int8_t> {
__device__ __forceinline__ static void Invoke(const int8_t* x, int8_t* y) { *y += *x; }
};
template<>
struct DeviceAdd<DeviceType::kGPU, int64_t> {
__device__ __forceinline__ static void Invoke(const int64_t* x, int64_t* y) { *y += *x; }
};
#define GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ \
FLOATING_DATA_TYPE_SEQ \
OF_PP_MAKE_TUPLE_SEQ(int32_t, DataType::kInt32)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_GATHER_ND_FUNCTOR, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ UNSIGNED_INT_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_SCATTER_ND_ADD_FUNCTOR, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_FILL_BY_ND_INDEX_FUNCTOR, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_GATHER_ND_KERNELS, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ UNSIGNED_INT_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SCATTER_ND_KERNELS, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ UNSIGNED_INT_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SCATTER_ND_LIKE_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_TENSOR_GATHER_ND_UPDATE_KERNELS, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ UNSIGNED_INT_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_TENSOR_GATHER_ND_ADD_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 && TORCH_HIP_VERSION >= 10000
template<>
struct DeviceAdd<DeviceType::kGPU, float16> {
__device__ __forceinline__ static void Invoke(const float16* x, float16* y) {
cuda::atomic::Add(reinterpret_cast<half*>(y), *(reinterpret_cast<const half*>(x)));
}
};
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_ND_INDEX_SLICE_FUNCTORS, (DeviceType::kGPU),
FLOAT16_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_ND_INDEX_SLICE_KERNELS, (DeviceType::kGPU),
FLOAT16_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
#endif
} // namespace oneflow
| 9425856776525c6e8d4786673c1d7804a475147b.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/nd_index_slice_kernels.h"
#include "oneflow/core/cuda/atomic.cuh"
namespace oneflow {
namespace {
template<typename T, typename I>
__global__ void CudaGatherNd(NdIndexSliceArgs<T, I> args, const I* indices, const T* dense,
T* slices) {
DoGatherNd(args.num_slices * args.slice_size, args.slice_size, args.index_ndims, args.dense_shape,
indices, dense, slices);
}
template<typename T, typename I>
__global__ void CudaScatterNdAdd(NdIndexSliceArgs<T, I> args, const I* indices, const T* slices,
T* dense) {
DoScatterNdAdd<DeviceType::kGPU>(args.num_slices * args.slice_size, args.slice_size,
args.index_ndims, args.dense_shape, indices, slices, dense);
}
template<typename T, typename I>
__global__ void CudaFillByNdIndex(NdIndexSliceArgs<T, I> args, const I* indices, T* dense,
T value) {
DoFillByNdIndex(args.num_slices * args.slice_size, args.slice_size, args.index_ndims,
args.dense_shape, indices, dense, value);
}
} // namespace
template<typename T, typename I>
struct GatherNdFunctor<DeviceType::kGPU, T, I> final {
void operator()(ep::Stream* stream, const NdIndexSliceArgs<T, I>& args, const I* indices,
const T* dense, T* slices) const {
RUN_CUDA_KERNEL((CudaGatherNd<T, I>), stream, args.num_slices * args.slice_size, args, indices,
dense, slices);
}
};
template<typename T, typename I>
struct ScatterNdAddFunctor<DeviceType::kGPU, T, I> final {
void operator()(ep::Stream* stream, const NdIndexSliceArgs<T, I>& args, const I* indices,
const T* slices, T* dense) const {
RUN_CUDA_KERNEL((CudaScatterNdAdd<T, I>), stream, args.num_slices * args.slice_size, args,
indices, slices, dense);
}
};
template<typename T, typename I>
struct FillByNdIndexFunctor<DeviceType::kGPU, T, I> final {
void operator()(ep::Stream* stream, const NdIndexSliceArgs<T, I>& args, const I* indices,
T* dense, T value) const {
RUN_CUDA_KERNEL((CudaFillByNdIndex<T, I>), stream, args.num_slices * args.slice_size, args,
indices, dense, value);
}
};
template<typename T>
struct DeviceAdd<DeviceType::kGPU, T> {
__device__ __forceinline__ static void Invoke(const T* x, T* y) { cuda::atomic::Add(y, *x); }
};
template<>
struct DeviceAdd<DeviceType::kGPU, uint8_t> {
__device__ __forceinline__ static void Invoke(const uint8_t* x, uint8_t* y) { *y += *x; }
};
template<>
struct DeviceAdd<DeviceType::kGPU, int8_t> {
__device__ __forceinline__ static void Invoke(const int8_t* x, int8_t* y) { *y += *x; }
};
template<>
struct DeviceAdd<DeviceType::kGPU, int64_t> {
__device__ __forceinline__ static void Invoke(const int64_t* x, int64_t* y) { *y += *x; }
};
#define GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ \
FLOATING_DATA_TYPE_SEQ \
OF_PP_MAKE_TUPLE_SEQ(int32_t, DataType::kInt32)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_GATHER_ND_FUNCTOR, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ UNSIGNED_INT_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_SCATTER_ND_ADD_FUNCTOR, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_FILL_BY_ND_INDEX_FUNCTOR, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_GATHER_ND_KERNELS, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ UNSIGNED_INT_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SCATTER_ND_KERNELS, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ UNSIGNED_INT_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SCATTER_ND_LIKE_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_TENSOR_GATHER_ND_UPDATE_KERNELS, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ UNSIGNED_INT_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_TENSOR_GATHER_ND_ADD_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 && CUDA_VERSION >= 10000
template<>
struct DeviceAdd<DeviceType::kGPU, float16> {
__device__ __forceinline__ static void Invoke(const float16* x, float16* y) {
cuda::atomic::Add(reinterpret_cast<half*>(y), *(reinterpret_cast<const half*>(x)));
}
};
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_ND_INDEX_SLICE_FUNCTORS, (DeviceType::kGPU),
FLOAT16_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_ND_INDEX_SLICE_KERNELS, (DeviceType::kGPU),
FLOAT16_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
#endif
} // namespace oneflow
|
2f3f912e027ccee3629fc0590f7483cb1e9f4e96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#ifndef NDARRAY_CPP
#define NDARRAY_CPP
#include "../NDArray.h"
#include "../NDArrayFactory.h"
#include "NativeOpExecutioner.h"
#include <memory/Workspace.h>
#include <memory/MemoryRegistrator.h>
#include <ops.h>
#include <ops/gemm.h>
#include <pointercast.h>
#include <stdexcept>
#include <memory>
#include <helpers/logger.h>
#include <loops/pairwise_transform.h>
#include <loops/transform_same.h>
#include <loops/random.h>
#include <loops/broadcasting.h>
#include <indexing/NDIndex.h>
#include <indexing/IndicesList.h>
#include <helpers/ShapeUtils.h>
#include <sstream>
#include <helpers/ArrayUtils.h>
#include <MmulHelper.h>
#include <helpers/threshold.h>
#include <exceptions/datatype_exception.h>
#include <exceptions/cuda_exception.h>
#include <specials_cuda.h>
#include <loops/special_kernels.h>
#include <PointersManager.h>
#include "../NDArray.hpp"
#include <ConstantShapeHelper.h>
namespace nd4j {
void* NDArray::platformBuffer() { return specialBuffer(); }
void* NDArray::getPlatformBuffer() const { return getSpecialBuffer(); }
Nd4jLong* NDArray::getPlatformShapeInfo() const { return getSpecialShapeInfo(); }
Nd4jLong* NDArray::platformShapeInfo() { return specialShapeInfo(); }
void NDArray::syncToDevice() const {
auto currentDeviceId = AffinityManager::currentDeviceId();
if (currentDeviceId != _deviceId) {
// first of all we update shapeInfo
const_cast<NDArray*>(this)->setShapeInfo(this->getShapeInfo());
// now we actually migrate data buffer
_buffer->migrate();
}
_buffer->syncToSpecial();
}
void NDArray::syncToHost() const { _buffer->syncToPrimary(getContext()); }
void NDArray::tickWriteHost() const { _buffer->writePrimary(); }
void NDArray::tickWriteDevice() const { _buffer->writeSpecial(); }
void NDArray::tickReadHost() const { _buffer->readPrimary(); }
void NDArray::tickReadDevice() const { _buffer->readSpecial(); }
void NDArray::tickBothActual() const { _buffer->writePrimary(); _buffer->readSpecial(); }
bool NDArray::isActualOnHostSide() const { return _buffer->isPrimaryActual(); }
bool NDArray::isActualOnDeviceSide() const { return _buffer->isSpecialActual(); }
void NDArray::makeBothBuffersActual() const { if(!isActualOnHostSide()) syncToHost(); if(!isActualOnDeviceSide()) syncToDevice(); }
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void fillAsTriangularCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const T val, const int lower, const int upper) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ int zRank, xRank, areSameOffsets; // xRank == zRank always, except when xRank = 1, in this case zRank = 2
__shared__ Nd4jLong zLen, totalThreads, *sharedMem; // xLen == zLen, except when xRank = 1, in this case zLen = 2*xLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * zRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
// if( (row + upper < col) || (row + lower > col) )
if((coords[zRank - 2] + upper < coords[zRank - 1]) || (coords[zRank - 2] + lower > coords[zRank - 1]))
z[zOffset] = val;
else if(vx != vz) { // when x and z are different arrays
if(xRank != zRank)
coords[0] = coords[1];
const auto xOffset = areSameOffsets ? zOffset : shape::getOffset(xShapeInfo, coords);
z[zOffset] = x[xOffset];
}
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
void NDArray::fillAsTriangular(const float val, int lower, int upper, NDArray& target, const char direction) {
if (isS())
throw std::runtime_error("NDArray::fillAsTriangular: you can't use this method on String array!");
if(!isSameShape(target) && !(rankOf() == 1 && target.rankOf() == 2 && sizeAt(0) == target.sizeAt(0) && sizeAt(0) == target.sizeAt(1)))
throw std::string("NDArray::fillAsTriangular method: wrong shape of target array !");
if (direction == 'u')
lower = -target.sizeAt(-2);
else if (direction == 'l')
upper = target.sizeAt(-1);
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (target.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(decltype(*target.getShapeInfo())) * target.rankOf() + 128;
PointersManager manager(getContext(), "NDArray::fillAsTriangular");
NDArray::prepareSpecialUse({&target}, {this});
hipLaunchKernelGGL(( fillAsTriangularCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *getContext()->getCudaStream(), getPlatformBuffer(), getPlatformShapeInfo(), target.getPlatformBuffer(), target.getPlatformShapeInfo(), static_cast<T>(val), lower, upper);
NDArray::registerSpecialUse({&target}, {this});
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT void NDArray::fillAsTriangular, (const float val, int lower, int upper, NDArray& target, const char direction), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void identityMatrixCuda(void* vx, const Nd4jLong* xShapeInfo, const T val) {
auto x = reinterpret_cast<T*>(vx);
__shared__ int rank;
__shared__ Nd4jLong len, totalThreads, *sharedMem; // xLen == zLen, except when xRank = 1, in this case zLen = 2*xLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
rank = shape::rank(xShapeInfo);
len = shape::length(xShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
shape::index2coords(i, xShapeInfo, coords);
const auto offset = shape::getOffset(xShapeInfo, coords);
if(coords[rank - 2] == coords[rank - 1]) // row == col -> on diagonal
x[offset] = val;
else
x[offset] = static_cast<T>(0);
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void identityMatrixCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, void* vx, const Nd4jLong *xShapeInfo, const float val) {
hipLaunchKernelGGL(( identityMatrixCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, static_cast<T>(val));
}
BUILD_SINGLE_TEMPLATE(template void identityMatrixCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, void* vx, const Nd4jLong *xShapeInfo, const float val), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
void NDArray::setIdentity() {
if (isS())
throw std::runtime_error("NDArray::setIdentity: you can't use this method on String array!");
// if (rankOf() != 2)
// throw std::runtime_error("NDArray::setIdentity: method should work only for 2D tensors. But " + toStringValue(rankOf()) + " was given.");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(decltype(getShapeInfo())) * rankOf() + 128;
PointersManager manager(getContext(), "NDArray::setIdentity");
syncToDevice();
BUILD_SINGLE_SELECTOR(dataType(), identityMatrixCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), getPlatformBuffer(), getPlatformShapeInfo(), 1.f), LIBND4J_TYPES);
tickWriteDevice();
manager.synchronize();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void NDArray::swapUnsafe(NDArray& other) {
auto xType = this->dataType();
if (xType != other.dataType())
throw std::runtime_error("NDArray::swapUnsage method: both arrays must have the same data type");
if(specialBuffer() == nullptr || other.specialBuffer() == nullptr)
throw std::runtime_error("NDArray::swapUnsafe method: input array should not be empty!");
if(lengthOf() != other.lengthOf())
throw std::runtime_error("NDArray::swapUnsafe method: input arrays should have the same length!");
BUILD_SINGLE_SELECTOR(xType, templatedSwapUnsafe, (specialBuffer(), specialShapeInfo(), other.specialBuffer(), other.specialShapeInfo(), getContext()->getCudaStream()), LIBND4J_TYPES);
}
////////////////////////////////////////////////////////////////////////
void NDArray::synchronize(const char* msg) const {
auto res = hipStreamSynchronize(*(getContext()->getCudaStream()));
if (res != 0)
throw std::runtime_error(msg + std::string(": synchronization failed !"));
}
////////////////////////////////////////////////////////////////////////
void NDArray::prepareSpecialUse(const std::vector<const NDArray*>& writeList, const std::vector<const NDArray*>& readList, bool synchronizeWritables) {
for (const auto& a : readList)
if(a != nullptr)
a->syncToDevice();
for (const auto& a : writeList) {
if (a != nullptr) {
a->getDataBuffer()->allocateSpecial();
if (synchronizeWritables)
a->syncToDevice();
}
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::registerSpecialUse(const std::vector<const NDArray*>& writeList, const std::vector<const NDArray*>& readList) {
for (const auto& p : readList)
if(p != nullptr)
p->tickReadDevice();
for (const auto& p : writeList)
if (p != nullptr)
p->tickWriteDevice();
}
////////////////////////////////////////////////////////////////////////
void NDArray::preparePrimaryUse(const std::vector<const NDArray*>& writeList, const std::vector<const NDArray*>& readList, bool synchronizeWritables) {
for (const auto& a : readList)
if(a != nullptr)
a->syncToHost();
for (const auto& a : writeList) {
if (a != nullptr) {
a->getDataBuffer()->allocatePrimary();
if (synchronizeWritables)
a->syncToHost();
}
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::registerPrimaryUse(const std::vector<const NDArray*>& writeList, const std::vector<const NDArray*>& readList) {
for (const auto& p : readList)
if(p != nullptr)
p->tickReadHost();
for (const auto& p : writeList)
if (p != nullptr)
p->tickWriteHost();
}
//////////////////////////////////////////////////////////////////////////
void NDArray::syncShape() const {
hipMemcpy(getSpecialShapeInfo(), getShapeInfo(), shape::shapeInfoByteLength(getShapeInfo()), hipMemcpyHostToDevice);
}
//////////////////////////////////////////////////////////////////////////
void* NDArray::specialBufferWithOffset(Nd4jLong offset) const {
return getSpecialBuffer() != nullptr ? static_cast<int8_t*>(getSpecialBuffer()) + (offset * sizeOfT()) : nullptr;
}
//////////////////////////////////////////////////////////////////////////
// change an array by repeating it the number of times given by reps.
NDArray NDArray::tile(const std::vector<Nd4jLong>& reps) const {
int dim = reps.size();
Nd4jLong product = 1;
for(const auto& item : reps)
product *= item;
if(product < 1)
throw std::runtime_error("NDArray::tile method: one of the elements in reps array is zero !");
int rankOld = rankOf();
int diff = rankOld - dim;
if(product==1) { // in this case 2 possibilities are present: just reshape or nothing to do
NDArray result(*this);
if(diff < 0) { // reshape to higher dimension
std::vector<Nd4jLong> shapeNew = reps; // need to have unities at first "diff" positions of new shape
memcpy(&shapeNew[-diff], result.getShapeInfo()+1, rankOld * sizeof(Nd4jLong)); // put old shape numbers at rest of positions
result.reshapei(ordering(), shapeNew);
}
return result; // nothing to do, if diff >= 0 -> identity tile
}
// evaluate shapeInfo for resulting array
auto newShapeInfo = ShapeUtils::evalTileShapeInfo(*this, reps, getContext()->getWorkspace());
// create new buffer, in any case the memory amount new buffer points to is bigger then those for old _buffer
std::shared_ptr<DataBuffer> newBuff = std::make_shared<DataBuffer>(shape::length(newShapeInfo) * sizeOfT(), dataType(), getContext()->getWorkspace(), true);
// assign new shape and new buffer to resulting array
NDArray result(newBuff, ShapeDescriptor(newShapeInfo), getContext());
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const auto resultLen = result.lengthOf();
auto xType = this->dataType();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&result}, {this});
BUILD_SINGLE_SELECTOR(xType, tileKernelH, (this->getSpecialBuffer(), this->getSpecialShapeInfo(), result.getSpecialBuffer(), result.getSpecialShapeInfo(), resultLen, stream), LIBND4J_TYPES);
registerSpecialUse({&result}, {this});
return result;
}
//////////////////////////////////////////////////////////////////////////
// change an array by repeating it the number of times given by reps.
void NDArray::tile(const std::vector<Nd4jLong>& reps, NDArray& target) const {
auto repProd = shape::prodLong(reps.data(), reps.size());
if (repProd < 1)
throw std::runtime_error("NDArray::tile: reps can't contain 0s");
// evaluate true tile shapeInfo for comparison with target shapeInfo
auto newShapeInfo = ShapeUtils::evalTileShapeInfo(*this, reps, getContext()->getWorkspace());
if(!shape::equalsSoft(newShapeInfo, target.getShapeInfo())) {
throw std::runtime_error("NDArray::tile method - shapeInfo of target array is not suitable for tile operation !");
}
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const int ews = target.ews();
const int targetLen = target.lengthOf();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&target}, {this});
BUILD_SINGLE_SELECTOR_TWICE(target.dataType(), tileKernelHH, (getSpecialBuffer(), getSpecialShapeInfo(), target.getSpecialBuffer(), target.getSpecialShapeInfo(), targetLen, ews, stream), LIBND4J_TYPES);
registerSpecialUse({&target}, {this});
}
//////////////////////////////////////////////////////////////////////////
void NDArray::tile(NDArray& target) const {
if(rankOf() > target.rankOf())
throw std::runtime_error("NDArray::tile method - rank of target array must be bigger or equal to the rank of this array !");
if(!ShapeUtils::areShapesBroadcastable(*this, target))
throw std::runtime_error("NDArray::tile method - shapeInfo of target array is not suitable for tile operation !");
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const auto ews = target.ews();
const auto targetLen = target.lengthOf();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&target}, {this});
BUILD_SINGLE_SELECTOR_TWICE(target.dataType(), tileKernelHH, (getSpecialBuffer(), getSpecialShapeInfo(), target.getSpecialBuffer(), target.getSpecialShapeInfo(), targetLen, ews, stream), LIBND4J_TYPES);
registerSpecialUse({&target}, {this});
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
__global__ static void repeatCuda(const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int* repeats, const int repSize,
const int axis) {
const X* x = reinterpret_cast<const X*>(vx);
Z* z = reinterpret_cast<Z*>(vz);
__shared__ int rank;
__shared__ Nd4jLong zLen, totalThreads, *sharedMem; // xLen = zLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
rank = shape::rank(zShapeInfo); // xRank = zRank
zLen = shape::length(zShapeInfo); // xLen <= zLen
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
if(repSize > 1) {
for (uint j = 0; j < repSize; ++j) {
coords[axis] -= repeats[j];
if (coords[axis] < 0) {
coords[axis] = j;
break;
}
}
}
else
coords[axis] /= repeats[0];
z[zOffset] = x[shape::getOffset(xShapeInfo, coords)];
}
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
static void repeatCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int* repeats, const int repSize,
const int axis) {
hipLaunchKernelGGL(( repeatCuda<X,Z>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, repeats, repSize, axis);
}
BUILD_DOUBLE_TEMPLATE(template void repeatCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int* repeats, const int repSize, const int axis), LIBND4J_TYPES, LIBND4J_TYPES);
//////////////////////////////////////////////////////////////////////////
// create new array by repeating it the number of times given by repeats
NDArray NDArray::repeat(const int axis, const std::vector<int>& repeats) const {
NDArray output('c', ShapeUtils::evalRepeatShape(axis, repeats, *this), dataType(), getContext());
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
PointersManager manager(getContext(), "NDArray::repeat(const int axis, const std::vector<int>& repeats)");
const int* reps = reinterpret_cast<int*>(manager.replicatePointer(repeats.data(), repeats.size() * sizeof(int)));
prepareSpecialUse({&output}, {this});
BUILD_SINGLE_SELECTOR_TWICE(dataType(), repeatCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), getSpecialBuffer(), getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), reps, repeats.size(), axis), LIBND4J_TYPES);
prepareSpecialUse({&output}, {this});
manager.synchronize();
return output;
}
//////////////////////////////////////////////////////////////////////////
// fill array by repeating it the number of times given by repeats
void NDArray::repeat(const int axis, const std::vector<int>& repeats, NDArray& target) const {
if(!target.isSameShape(ShapeUtils::evalRepeatShape(axis, repeats, *this)))
throw std::invalid_argument("NDArray::repeat(const int axis, const std::vector<int>& repeats, NDArray& target) method: wrong shape of target array!");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (target.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = target.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
PointersManager manager(getContext(), "NDArray::repeat(const int axis, const std::vector<int>& repeats)");
const int* reps = reinterpret_cast<int*>(manager.replicatePointer(repeats.data(), repeats.size() * sizeof(int)));
prepareSpecialUse({&target}, {this});
BUILD_DOUBLE_SELECTOR(dataType(), target.dataType(), repeatCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), getSpecialBuffer(), getSpecialShapeInfo(), target.specialBuffer(), target.specialShapeInfo(), reps, repeats.size(), axis), LIBND4J_TYPES, LIBND4J_TYPES);
prepareSpecialUse({&target}, {this});
manager.synchronize();
}
////////////////////////////////////////////////////////////////////////
void* NDArray::specialBuffer() {
if (_buffer->special() == nullptr)
return getBuffer();
// FIXME: this should be fixed once CUDA backend added
return static_cast<int8_t*>(_buffer->special()) + (_offset * sizeOfT());
}
////////////////////////////////////////////////////////////////////////
void* NDArray::getSpecialBuffer() const {
if (_buffer->special() == nullptr)
return getBuffer();
// FIXME: this should be fixed once CUDA backend added
return static_cast<int8_t*>(_buffer->special()) + (_offset * sizeOfT());
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
void NDArray::printCurrentBuffer(const bool host, const char* msg, const int precision) const {
if(_length == 0)
{ printf("NDArray::printActualBuffer: array length is zero !\n"); return; }
if(msg)
printf("%s", msg);
if(host) {
if(getBuffer() == nullptr || _length == 0)
{ printf("NDArray::printActualBuffer: host buffer is nullptr !\n"); return; }
const T* buff = bufferAsT<T>();
for (uint i = 0; i < _length; i++)
printf("%.*f, ", precision, (double)buff[getOffset(i)]);
printf("\n");
}
else {
if(getSpecialBuffer() == nullptr || _length == 0)
{ printf("NDArray::printSpecialBuffer: special buffer is nullptr !\n"); return; }
void* pHost = operator new(sizeof(T) * _length);
if (ews() != 1) {
for (uint i = 0; i < _length; i++)
hipMemcpyAsync(reinterpret_cast<T*>(pHost) + i, specialBufferWithOffset(i), sizeof(T), hipMemcpyDeviceToHost, *(getContext()->getCudaStream()));
}
else
hipMemcpyAsync(pHost, getSpecialBuffer(), sizeOfT() * _length, hipMemcpyDeviceToHost, *getContext()->getCudaStream());
hipError_t cudaResult = hipStreamSynchronize(*getContext()->getCudaStream());
if(cudaResult != 0)
throw std::runtime_error("NDArray::printSpecialBuffer: hipStreamSynchronize failed!");
for (uint i = 0; i < _length; i++)
printf("%.*f, ", precision, (double)reinterpret_cast<T*>(pHost)[i]);
printf("\n");
operator delete(pHost);
}
}
template void NDArray::printCurrentBuffer<int>(const bool host,const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<float>(const bool host, const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<double>(const bool host, const char* msg, const int precision) const;
#if defined(__HIPCC__) && !defined(BUILD_TESTS)
//#include <cpu/NDArrayLambda.hpp>
#endif
} // end namespace nd4j
#endif
| 2f3f912e027ccee3629fc0590f7483cb1e9f4e96.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#ifndef NDARRAY_CPP
#define NDARRAY_CPP
#include "../NDArray.h"
#include "../NDArrayFactory.h"
#include "NativeOpExecutioner.h"
#include <memory/Workspace.h>
#include <memory/MemoryRegistrator.h>
#include <ops.h>
#include <ops/gemm.h>
#include <pointercast.h>
#include <stdexcept>
#include <memory>
#include <helpers/logger.h>
#include <loops/pairwise_transform.h>
#include <loops/transform_same.h>
#include <loops/random.h>
#include <loops/broadcasting.h>
#include <indexing/NDIndex.h>
#include <indexing/IndicesList.h>
#include <helpers/ShapeUtils.h>
#include <sstream>
#include <helpers/ArrayUtils.h>
#include <MmulHelper.h>
#include <helpers/threshold.h>
#include <exceptions/datatype_exception.h>
#include <exceptions/cuda_exception.h>
#include <specials_cuda.h>
#include <loops/special_kernels.h>
#include <PointersManager.h>
#include "../NDArray.hpp"
#include <ConstantShapeHelper.h>
namespace nd4j {
void* NDArray::platformBuffer() { return specialBuffer(); }
void* NDArray::getPlatformBuffer() const { return getSpecialBuffer(); }
Nd4jLong* NDArray::getPlatformShapeInfo() const { return getSpecialShapeInfo(); }
Nd4jLong* NDArray::platformShapeInfo() { return specialShapeInfo(); }
void NDArray::syncToDevice() const {
auto currentDeviceId = AffinityManager::currentDeviceId();
if (currentDeviceId != _deviceId) {
// first of all we update shapeInfo
const_cast<NDArray*>(this)->setShapeInfo(this->getShapeInfo());
// now we actually migrate data buffer
_buffer->migrate();
}
_buffer->syncToSpecial();
}
void NDArray::syncToHost() const { _buffer->syncToPrimary(getContext()); }
void NDArray::tickWriteHost() const { _buffer->writePrimary(); }
void NDArray::tickWriteDevice() const { _buffer->writeSpecial(); }
void NDArray::tickReadHost() const { _buffer->readPrimary(); }
void NDArray::tickReadDevice() const { _buffer->readSpecial(); }
void NDArray::tickBothActual() const { _buffer->writePrimary(); _buffer->readSpecial(); }
bool NDArray::isActualOnHostSide() const { return _buffer->isPrimaryActual(); }
bool NDArray::isActualOnDeviceSide() const { return _buffer->isSpecialActual(); }
void NDArray::makeBothBuffersActual() const { if(!isActualOnHostSide()) syncToHost(); if(!isActualOnDeviceSide()) syncToDevice(); }
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void fillAsTriangularCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const T val, const int lower, const int upper) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ int zRank, xRank, areSameOffsets; // xRank == zRank always, except when xRank = 1, in this case zRank = 2
__shared__ Nd4jLong zLen, totalThreads, *sharedMem; // xLen == zLen, except when xRank = 1, in this case zLen = 2*xLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * zRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
// if( (row + upper < col) || (row + lower > col) )
if((coords[zRank - 2] + upper < coords[zRank - 1]) || (coords[zRank - 2] + lower > coords[zRank - 1]))
z[zOffset] = val;
else if(vx != vz) { // when x and z are different arrays
if(xRank != zRank)
coords[0] = coords[1];
const auto xOffset = areSameOffsets ? zOffset : shape::getOffset(xShapeInfo, coords);
z[zOffset] = x[xOffset];
}
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
void NDArray::fillAsTriangular(const float val, int lower, int upper, NDArray& target, const char direction) {
if (isS())
throw std::runtime_error("NDArray::fillAsTriangular: you can't use this method on String array!");
if(!isSameShape(target) && !(rankOf() == 1 && target.rankOf() == 2 && sizeAt(0) == target.sizeAt(0) && sizeAt(0) == target.sizeAt(1)))
throw std::string("NDArray::fillAsTriangular method: wrong shape of target array !");
if (direction == 'u')
lower = -target.sizeAt(-2);
else if (direction == 'l')
upper = target.sizeAt(-1);
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (target.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(decltype(*target.getShapeInfo())) * target.rankOf() + 128;
PointersManager manager(getContext(), "NDArray::fillAsTriangular");
NDArray::prepareSpecialUse({&target}, {this});
fillAsTriangularCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *getContext()->getCudaStream()>>>(getPlatformBuffer(), getPlatformShapeInfo(), target.getPlatformBuffer(), target.getPlatformShapeInfo(), static_cast<T>(val), lower, upper);
NDArray::registerSpecialUse({&target}, {this});
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT void NDArray::fillAsTriangular, (const float val, int lower, int upper, NDArray& target, const char direction), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void identityMatrixCuda(void* vx, const Nd4jLong* xShapeInfo, const T val) {
auto x = reinterpret_cast<T*>(vx);
__shared__ int rank;
__shared__ Nd4jLong len, totalThreads, *sharedMem; // xLen == zLen, except when xRank = 1, in this case zLen = 2*xLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
rank = shape::rank(xShapeInfo);
len = shape::length(xShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
shape::index2coords(i, xShapeInfo, coords);
const auto offset = shape::getOffset(xShapeInfo, coords);
if(coords[rank - 2] == coords[rank - 1]) // row == col -> on diagonal
x[offset] = val;
else
x[offset] = static_cast<T>(0);
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void identityMatrixCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, void* vx, const Nd4jLong *xShapeInfo, const float val) {
identityMatrixCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, static_cast<T>(val));
}
BUILD_SINGLE_TEMPLATE(template void identityMatrixCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, void* vx, const Nd4jLong *xShapeInfo, const float val), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
void NDArray::setIdentity() {
if (isS())
throw std::runtime_error("NDArray::setIdentity: you can't use this method on String array!");
// if (rankOf() != 2)
// throw std::runtime_error("NDArray::setIdentity: method should work only for 2D tensors. But " + toStringValue(rankOf()) + " was given.");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(decltype(getShapeInfo())) * rankOf() + 128;
PointersManager manager(getContext(), "NDArray::setIdentity");
syncToDevice();
BUILD_SINGLE_SELECTOR(dataType(), identityMatrixCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), getPlatformBuffer(), getPlatformShapeInfo(), 1.f), LIBND4J_TYPES);
tickWriteDevice();
manager.synchronize();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void NDArray::swapUnsafe(NDArray& other) {
auto xType = this->dataType();
if (xType != other.dataType())
throw std::runtime_error("NDArray::swapUnsage method: both arrays must have the same data type");
if(specialBuffer() == nullptr || other.specialBuffer() == nullptr)
throw std::runtime_error("NDArray::swapUnsafe method: input array should not be empty!");
if(lengthOf() != other.lengthOf())
throw std::runtime_error("NDArray::swapUnsafe method: input arrays should have the same length!");
BUILD_SINGLE_SELECTOR(xType, templatedSwapUnsafe, (specialBuffer(), specialShapeInfo(), other.specialBuffer(), other.specialShapeInfo(), getContext()->getCudaStream()), LIBND4J_TYPES);
}
////////////////////////////////////////////////////////////////////////
void NDArray::synchronize(const char* msg) const {
auto res = cudaStreamSynchronize(*(getContext()->getCudaStream()));
if (res != 0)
throw std::runtime_error(msg + std::string(": synchronization failed !"));
}
////////////////////////////////////////////////////////////////////////
void NDArray::prepareSpecialUse(const std::vector<const NDArray*>& writeList, const std::vector<const NDArray*>& readList, bool synchronizeWritables) {
for (const auto& a : readList)
if(a != nullptr)
a->syncToDevice();
for (const auto& a : writeList) {
if (a != nullptr) {
a->getDataBuffer()->allocateSpecial();
if (synchronizeWritables)
a->syncToDevice();
}
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::registerSpecialUse(const std::vector<const NDArray*>& writeList, const std::vector<const NDArray*>& readList) {
for (const auto& p : readList)
if(p != nullptr)
p->tickReadDevice();
for (const auto& p : writeList)
if (p != nullptr)
p->tickWriteDevice();
}
////////////////////////////////////////////////////////////////////////
void NDArray::preparePrimaryUse(const std::vector<const NDArray*>& writeList, const std::vector<const NDArray*>& readList, bool synchronizeWritables) {
for (const auto& a : readList)
if(a != nullptr)
a->syncToHost();
for (const auto& a : writeList) {
if (a != nullptr) {
a->getDataBuffer()->allocatePrimary();
if (synchronizeWritables)
a->syncToHost();
}
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::registerPrimaryUse(const std::vector<const NDArray*>& writeList, const std::vector<const NDArray*>& readList) {
for (const auto& p : readList)
if(p != nullptr)
p->tickReadHost();
for (const auto& p : writeList)
if (p != nullptr)
p->tickWriteHost();
}
//////////////////////////////////////////////////////////////////////////
void NDArray::syncShape() const {
cudaMemcpy(getSpecialShapeInfo(), getShapeInfo(), shape::shapeInfoByteLength(getShapeInfo()), cudaMemcpyHostToDevice);
}
//////////////////////////////////////////////////////////////////////////
void* NDArray::specialBufferWithOffset(Nd4jLong offset) const {
return getSpecialBuffer() != nullptr ? static_cast<int8_t*>(getSpecialBuffer()) + (offset * sizeOfT()) : nullptr;
}
//////////////////////////////////////////////////////////////////////////
// change an array by repeating it the number of times given by reps.
NDArray NDArray::tile(const std::vector<Nd4jLong>& reps) const {
int dim = reps.size();
Nd4jLong product = 1;
for(const auto& item : reps)
product *= item;
if(product < 1)
throw std::runtime_error("NDArray::tile method: one of the elements in reps array is zero !");
int rankOld = rankOf();
int diff = rankOld - dim;
if(product==1) { // in this case 2 possibilities are present: just reshape or nothing to do
NDArray result(*this);
if(diff < 0) { // reshape to higher dimension
std::vector<Nd4jLong> shapeNew = reps; // need to have unities at first "diff" positions of new shape
memcpy(&shapeNew[-diff], result.getShapeInfo()+1, rankOld * sizeof(Nd4jLong)); // put old shape numbers at rest of positions
result.reshapei(ordering(), shapeNew);
}
return result; // nothing to do, if diff >= 0 -> identity tile
}
// evaluate shapeInfo for resulting array
auto newShapeInfo = ShapeUtils::evalTileShapeInfo(*this, reps, getContext()->getWorkspace());
// create new buffer, in any case the memory amount new buffer points to is bigger then those for old _buffer
std::shared_ptr<DataBuffer> newBuff = std::make_shared<DataBuffer>(shape::length(newShapeInfo) * sizeOfT(), dataType(), getContext()->getWorkspace(), true);
// assign new shape and new buffer to resulting array
NDArray result(newBuff, ShapeDescriptor(newShapeInfo), getContext());
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const auto resultLen = result.lengthOf();
auto xType = this->dataType();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&result}, {this});
BUILD_SINGLE_SELECTOR(xType, tileKernelH, (this->getSpecialBuffer(), this->getSpecialShapeInfo(), result.getSpecialBuffer(), result.getSpecialShapeInfo(), resultLen, stream), LIBND4J_TYPES);
registerSpecialUse({&result}, {this});
return result;
}
//////////////////////////////////////////////////////////////////////////
// change an array by repeating it the number of times given by reps.
void NDArray::tile(const std::vector<Nd4jLong>& reps, NDArray& target) const {
auto repProd = shape::prodLong(reps.data(), reps.size());
if (repProd < 1)
throw std::runtime_error("NDArray::tile: reps can't contain 0s");
// evaluate true tile shapeInfo for comparison with target shapeInfo
auto newShapeInfo = ShapeUtils::evalTileShapeInfo(*this, reps, getContext()->getWorkspace());
if(!shape::equalsSoft(newShapeInfo, target.getShapeInfo())) {
throw std::runtime_error("NDArray::tile method - shapeInfo of target array is not suitable for tile operation !");
}
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const int ews = target.ews();
const int targetLen = target.lengthOf();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&target}, {this});
BUILD_SINGLE_SELECTOR_TWICE(target.dataType(), tileKernelHH, (getSpecialBuffer(), getSpecialShapeInfo(), target.getSpecialBuffer(), target.getSpecialShapeInfo(), targetLen, ews, stream), LIBND4J_TYPES);
registerSpecialUse({&target}, {this});
}
//////////////////////////////////////////////////////////////////////////
void NDArray::tile(NDArray& target) const {
if(rankOf() > target.rankOf())
throw std::runtime_error("NDArray::tile method - rank of target array must be bigger or equal to the rank of this array !");
if(!ShapeUtils::areShapesBroadcastable(*this, target))
throw std::runtime_error("NDArray::tile method - shapeInfo of target array is not suitable for tile operation !");
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const auto ews = target.ews();
const auto targetLen = target.lengthOf();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&target}, {this});
BUILD_SINGLE_SELECTOR_TWICE(target.dataType(), tileKernelHH, (getSpecialBuffer(), getSpecialShapeInfo(), target.getSpecialBuffer(), target.getSpecialShapeInfo(), targetLen, ews, stream), LIBND4J_TYPES);
registerSpecialUse({&target}, {this});
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
__global__ static void repeatCuda(const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int* repeats, const int repSize,
const int axis) {
const X* x = reinterpret_cast<const X*>(vx);
Z* z = reinterpret_cast<Z*>(vz);
__shared__ int rank;
__shared__ Nd4jLong zLen, totalThreads, *sharedMem; // xLen = zLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
rank = shape::rank(zShapeInfo); // xRank = zRank
zLen = shape::length(zShapeInfo); // xLen <= zLen
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
if(repSize > 1) {
for (uint j = 0; j < repSize; ++j) {
coords[axis] -= repeats[j];
if (coords[axis] < 0) {
coords[axis] = j;
break;
}
}
}
else
coords[axis] /= repeats[0];
z[zOffset] = x[shape::getOffset(xShapeInfo, coords)];
}
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
static void repeatCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int* repeats, const int repSize,
const int axis) {
repeatCuda<X,Z><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, repeats, repSize, axis);
}
BUILD_DOUBLE_TEMPLATE(template void repeatCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int* repeats, const int repSize, const int axis), LIBND4J_TYPES, LIBND4J_TYPES);
//////////////////////////////////////////////////////////////////////////
// create new array by repeating it the number of times given by repeats
NDArray NDArray::repeat(const int axis, const std::vector<int>& repeats) const {
NDArray output('c', ShapeUtils::evalRepeatShape(axis, repeats, *this), dataType(), getContext());
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
PointersManager manager(getContext(), "NDArray::repeat(const int axis, const std::vector<int>& repeats)");
const int* reps = reinterpret_cast<int*>(manager.replicatePointer(repeats.data(), repeats.size() * sizeof(int)));
prepareSpecialUse({&output}, {this});
BUILD_SINGLE_SELECTOR_TWICE(dataType(), repeatCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), getSpecialBuffer(), getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), reps, repeats.size(), axis), LIBND4J_TYPES);
prepareSpecialUse({&output}, {this});
manager.synchronize();
return output;
}
//////////////////////////////////////////////////////////////////////////
// fill array by repeating it the number of times given by repeats
void NDArray::repeat(const int axis, const std::vector<int>& repeats, NDArray& target) const {
if(!target.isSameShape(ShapeUtils::evalRepeatShape(axis, repeats, *this)))
throw std::invalid_argument("NDArray::repeat(const int axis, const std::vector<int>& repeats, NDArray& target) method: wrong shape of target array!");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (target.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = target.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
PointersManager manager(getContext(), "NDArray::repeat(const int axis, const std::vector<int>& repeats)");
const int* reps = reinterpret_cast<int*>(manager.replicatePointer(repeats.data(), repeats.size() * sizeof(int)));
prepareSpecialUse({&target}, {this});
BUILD_DOUBLE_SELECTOR(dataType(), target.dataType(), repeatCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), getSpecialBuffer(), getSpecialShapeInfo(), target.specialBuffer(), target.specialShapeInfo(), reps, repeats.size(), axis), LIBND4J_TYPES, LIBND4J_TYPES);
prepareSpecialUse({&target}, {this});
manager.synchronize();
}
////////////////////////////////////////////////////////////////////////
void* NDArray::specialBuffer() {
if (_buffer->special() == nullptr)
return getBuffer();
// FIXME: this should be fixed once CUDA backend added
return static_cast<int8_t*>(_buffer->special()) + (_offset * sizeOfT());
}
////////////////////////////////////////////////////////////////////////
void* NDArray::getSpecialBuffer() const {
if (_buffer->special() == nullptr)
return getBuffer();
// FIXME: this should be fixed once CUDA backend added
return static_cast<int8_t*>(_buffer->special()) + (_offset * sizeOfT());
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
void NDArray::printCurrentBuffer(const bool host, const char* msg, const int precision) const {
if(_length == 0)
{ printf("NDArray::printActualBuffer: array length is zero !\n"); return; }
if(msg)
printf("%s", msg);
if(host) {
if(getBuffer() == nullptr || _length == 0)
{ printf("NDArray::printActualBuffer: host buffer is nullptr !\n"); return; }
const T* buff = bufferAsT<T>();
for (uint i = 0; i < _length; i++)
printf("%.*f, ", precision, (double)buff[getOffset(i)]);
printf("\n");
}
else {
if(getSpecialBuffer() == nullptr || _length == 0)
{ printf("NDArray::printSpecialBuffer: special buffer is nullptr !\n"); return; }
void* pHost = operator new(sizeof(T) * _length);
if (ews() != 1) {
for (uint i = 0; i < _length; i++)
cudaMemcpyAsync(reinterpret_cast<T*>(pHost) + i, specialBufferWithOffset(i), sizeof(T), cudaMemcpyDeviceToHost, *(getContext()->getCudaStream()));
}
else
cudaMemcpyAsync(pHost, getSpecialBuffer(), sizeOfT() * _length, cudaMemcpyDeviceToHost, *getContext()->getCudaStream());
cudaError_t cudaResult = cudaStreamSynchronize(*getContext()->getCudaStream());
if(cudaResult != 0)
throw std::runtime_error("NDArray::printSpecialBuffer: cudaStreamSynchronize failed!");
for (uint i = 0; i < _length; i++)
printf("%.*f, ", precision, (double)reinterpret_cast<T*>(pHost)[i]);
printf("\n");
operator delete(pHost);
}
}
template void NDArray::printCurrentBuffer<int>(const bool host,const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<float>(const bool host, const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<double>(const bool host, const char* msg, const int precision) const;
#if defined(__CUDACC__) && !defined(BUILD_TESTS)
//#include <cpu/NDArrayLambda.hpp>
#endif
} // end namespace nd4j
#endif
|
134a3529e278e87be2c5d5d254a23cabfff17a7e.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*/
#include <memory>
#include <mutex> // NOLINT
#include "ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h"
#include "c10/hip/HIPFunctions.h"
#include "k2/csrc/context.h"
#include "k2/csrc/log.h"
#include "k2/csrc/pytorch_context.h"
namespace k2 {
static std::once_flag has_cuda_init_flag;
static bool has_cuda = false;
static void InitHasCuda() {
if (torch::cuda::is_available())
has_cuda = true;
else
K2_LOG(WARNING) << "CUDA is not available. Return a CPU context.";
}
class PytorchCpuContext : public Context {
public:
PytorchCpuContext() {
allocator_ = torch::GetAllocator(torch::kCPU);
K2_CHECK(allocator_->raw_deleter() != nullptr);
}
DeviceType GetDeviceType() const override { return kCpu; }
void *Allocate(std::size_t bytes, void **deleter_context) override {
void *p = allocator_->raw_allocate(bytes);
if (deleter_context != nullptr) *deleter_context = nullptr;
return p;
}
void Deallocate(void *data, void *deleter_context) override {
if (deleter_context != nullptr) {
// a non-empty `deleter_context` indicates that
// the memory is passed from a `torch::Tensor`
delete reinterpret_cast<ManagedTensor *>(deleter_context);
} else {
allocator_->raw_deallocate(data);
}
}
bool IsCompatible(const Context &other) const override {
return other.GetDeviceType() == kCpu;
}
void CopyDataTo(size_t num_bytes, const void *src, ContextPtr dst_context,
void *dst) override {
DeviceType device_type = dst_context->GetDeviceType();
switch (device_type) {
case kCpu:
memcpy(dst, src, num_bytes);
break;
case kCuda: {
ContextPtr pinned_context = GetPinnedContext();
auto region = NewRegion(pinned_context, num_bytes);
memcpy(region->data, src, num_bytes);
pinned_context->CopyDataTo(num_bytes, region->data, dst_context, dst);
break;
}
default:
K2_LOG(FATAL) << "Unsupported device type: " << device_type;
break;
}
}
private:
torch::Allocator *allocator_; // NOT owned here
};
class PytorchCudaContext : public Context {
public:
explicit PytorchCudaContext(int32_t gpu_id) : gpu_id_(gpu_id) {
K2_CHECK_GE(gpu_id, 0);
K2_CHECK_LT(gpu_id, c10::hip::device_count());
c10::hip::set_device(gpu_id);
// The internals of `lazyInitCUDA` are executed only once
// so it is fine to invoke lazyInitCUDA() multiple times.
// The call will be inlined since it is defined in the header
// aten/src/ATen/Context.h
at::globalContext().lazyInitCUDA();
allocator_ = c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
K2_CHECK(allocator_->raw_deleter() != nullptr);
}
DeviceType GetDeviceType() const override { return kCuda; }
int32_t GetDeviceId() const override { return gpu_id_; }
hipStream_t GetCudaStream() const override {
return g_stream_override.OverrideStream(
c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(gpu_id_));
}
void *Allocate(std::size_t bytes, void **deleter_context) override {
void *p = allocator_->raw_allocate(bytes);
if (deleter_context != nullptr) *deleter_context = nullptr;
return p;
}
void Deallocate(void *data, void *deleter_context) override {
if (deleter_context != nullptr) {
// a non-empty `deleter_context` indicates that
// the memory is passed from a `torch::Tensor`
delete reinterpret_cast<ManagedTensor *>(deleter_context);
} else {
allocator_->raw_deallocate(data);
}
}
bool IsCompatible(const Context &other) const override {
return other.GetDeviceType() == kCuda && other.GetDeviceId() == gpu_id_;
}
void Sync() const override {
auto ret = hipStreamSynchronize(GetCudaStream());
K2_CHECK_CUDA_ERROR(ret);
}
void CopyDataTo(size_t num_bytes, const void *src, ContextPtr dst_context,
void *dst) override {
DeviceType device_type = dst_context->GetDeviceType();
switch (device_type) {
case kCpu: {
hipError_t ret =
hipMemcpy(dst, src, num_bytes, hipMemcpyDeviceToHost);
K2_CHECK_CUDA_ERROR(ret);
break;
}
case kCuda: {
hipError_t ret =
hipMemcpyAsync(dst, src, num_bytes, hipMemcpyDeviceToDevice,
dst_context->GetCudaStream());
K2_CHECK_CUDA_ERROR(ret);
break;
}
default:
K2_LOG(FATAL) << "Unsupported device type: " << device_type;
break;
}
}
private:
torch::Allocator *allocator_; // NOT owned here
int32_t gpu_id_;
};
ContextPtr GetCpuContext() { return std::make_shared<PytorchCpuContext>(); }
ContextPtr GetCudaContext(int32_t gpu_id /*= -1*/) {
std::call_once(has_cuda_init_flag, InitHasCuda);
if (has_cuda) {
if (gpu_id < 0) gpu_id = c10::hip::current_device();
return std::make_shared<PytorchCudaContext>(gpu_id);
}
return GetCpuContext();
}
RegionPtr NewRegion(torch::Tensor tensor) {
auto ans = std::make_shared<Region>();
if (tensor.device().type() == torch::kCPU) {
ans->context = GetCpuContext();
} else if (tensor.is_cuda()) {
ans->context = GetCudaContext(tensor.device().index());
} else {
K2_LOG(FATAL) << "Unsupported device: " << tensor.device()
<< "\nOnly CPU and CUDA are supported";
}
// NOTE: the tensor is passed from Python and we have
// to retain it to avoid potential segmentation fault.
//
// It will be freed in `Context::Deallocate`.
auto *managed_tensor = new ManagedTensor(tensor);
ans->data = tensor.data_ptr();
ans->deleter_context = managed_tensor;
#if K2_TORCH_VERSION_MAJOR > 1 || \
(K2_TORCH_VERSION_MAJOR == 1 && K2_TORCH_VERSION_MINOR > 5)
// nbytes() is available only for torch > 1.5
// see https://github.com/pytorch/pytorch/pull/37028
ans->num_bytes = tensor.storage().nbytes();
#else
// capacity() is available only for torch <= 1.5.0
ans->num_bytes = tensor.storage().capacity();
#endif
ans->bytes_used = ans->num_bytes;
return ans;
}
} // namespace k2
| 134a3529e278e87be2c5d5d254a23cabfff17a7e.cu | /**
* Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*/
#include <memory>
#include <mutex> // NOLINT
#include "c10/cuda/CUDACachingAllocator.h"
#include "c10/cuda/CUDAFunctions.h"
#include "k2/csrc/context.h"
#include "k2/csrc/log.h"
#include "k2/csrc/pytorch_context.h"
namespace k2 {
static std::once_flag has_cuda_init_flag;
static bool has_cuda = false;
static void InitHasCuda() {
if (torch::cuda::is_available())
has_cuda = true;
else
K2_LOG(WARNING) << "CUDA is not available. Return a CPU context.";
}
class PytorchCpuContext : public Context {
public:
PytorchCpuContext() {
allocator_ = torch::GetAllocator(torch::kCPU);
K2_CHECK(allocator_->raw_deleter() != nullptr);
}
DeviceType GetDeviceType() const override { return kCpu; }
void *Allocate(std::size_t bytes, void **deleter_context) override {
void *p = allocator_->raw_allocate(bytes);
if (deleter_context != nullptr) *deleter_context = nullptr;
return p;
}
void Deallocate(void *data, void *deleter_context) override {
if (deleter_context != nullptr) {
// a non-empty `deleter_context` indicates that
// the memory is passed from a `torch::Tensor`
delete reinterpret_cast<ManagedTensor *>(deleter_context);
} else {
allocator_->raw_deallocate(data);
}
}
bool IsCompatible(const Context &other) const override {
return other.GetDeviceType() == kCpu;
}
void CopyDataTo(size_t num_bytes, const void *src, ContextPtr dst_context,
void *dst) override {
DeviceType device_type = dst_context->GetDeviceType();
switch (device_type) {
case kCpu:
memcpy(dst, src, num_bytes);
break;
case kCuda: {
ContextPtr pinned_context = GetPinnedContext();
auto region = NewRegion(pinned_context, num_bytes);
memcpy(region->data, src, num_bytes);
pinned_context->CopyDataTo(num_bytes, region->data, dst_context, dst);
break;
}
default:
K2_LOG(FATAL) << "Unsupported device type: " << device_type;
break;
}
}
private:
torch::Allocator *allocator_; // NOT owned here
};
class PytorchCudaContext : public Context {
public:
explicit PytorchCudaContext(int32_t gpu_id) : gpu_id_(gpu_id) {
K2_CHECK_GE(gpu_id, 0);
K2_CHECK_LT(gpu_id, c10::cuda::device_count());
c10::cuda::set_device(gpu_id);
// The internals of `lazyInitCUDA` are executed only once
// so it is fine to invoke lazyInitCUDA() multiple times.
// The call will be inlined since it is defined in the header
// aten/src/ATen/Context.h
at::globalContext().lazyInitCUDA();
allocator_ = c10::cuda::CUDACachingAllocator::get();
K2_CHECK(allocator_->raw_deleter() != nullptr);
}
DeviceType GetDeviceType() const override { return kCuda; }
int32_t GetDeviceId() const override { return gpu_id_; }
cudaStream_t GetCudaStream() const override {
return g_stream_override.OverrideStream(
c10::cuda::getCurrentCUDAStream(gpu_id_));
}
void *Allocate(std::size_t bytes, void **deleter_context) override {
void *p = allocator_->raw_allocate(bytes);
if (deleter_context != nullptr) *deleter_context = nullptr;
return p;
}
void Deallocate(void *data, void *deleter_context) override {
if (deleter_context != nullptr) {
// a non-empty `deleter_context` indicates that
// the memory is passed from a `torch::Tensor`
delete reinterpret_cast<ManagedTensor *>(deleter_context);
} else {
allocator_->raw_deallocate(data);
}
}
bool IsCompatible(const Context &other) const override {
return other.GetDeviceType() == kCuda && other.GetDeviceId() == gpu_id_;
}
void Sync() const override {
auto ret = cudaStreamSynchronize(GetCudaStream());
K2_CHECK_CUDA_ERROR(ret);
}
void CopyDataTo(size_t num_bytes, const void *src, ContextPtr dst_context,
void *dst) override {
DeviceType device_type = dst_context->GetDeviceType();
switch (device_type) {
case kCpu: {
cudaError_t ret =
cudaMemcpy(dst, src, num_bytes, cudaMemcpyDeviceToHost);
K2_CHECK_CUDA_ERROR(ret);
break;
}
case kCuda: {
cudaError_t ret =
cudaMemcpyAsync(dst, src, num_bytes, cudaMemcpyDeviceToDevice,
dst_context->GetCudaStream());
K2_CHECK_CUDA_ERROR(ret);
break;
}
default:
K2_LOG(FATAL) << "Unsupported device type: " << device_type;
break;
}
}
private:
torch::Allocator *allocator_; // NOT owned here
int32_t gpu_id_;
};
ContextPtr GetCpuContext() { return std::make_shared<PytorchCpuContext>(); }
ContextPtr GetCudaContext(int32_t gpu_id /*= -1*/) {
std::call_once(has_cuda_init_flag, InitHasCuda);
if (has_cuda) {
if (gpu_id < 0) gpu_id = c10::cuda::current_device();
return std::make_shared<PytorchCudaContext>(gpu_id);
}
return GetCpuContext();
}
RegionPtr NewRegion(torch::Tensor tensor) {
auto ans = std::make_shared<Region>();
if (tensor.device().type() == torch::kCPU) {
ans->context = GetCpuContext();
} else if (tensor.is_cuda()) {
ans->context = GetCudaContext(tensor.device().index());
} else {
K2_LOG(FATAL) << "Unsupported device: " << tensor.device()
<< "\nOnly CPU and CUDA are supported";
}
// NOTE: the tensor is passed from Python and we have
// to retain it to avoid potential segmentation fault.
//
// It will be freed in `Context::Deallocate`.
auto *managed_tensor = new ManagedTensor(tensor);
ans->data = tensor.data_ptr();
ans->deleter_context = managed_tensor;
#if K2_TORCH_VERSION_MAJOR > 1 || \
(K2_TORCH_VERSION_MAJOR == 1 && K2_TORCH_VERSION_MINOR > 5)
// nbytes() is available only for torch > 1.5
// see https://github.com/pytorch/pytorch/pull/37028
ans->num_bytes = tensor.storage().nbytes();
#else
// capacity() is available only for torch <= 1.5.0
ans->num_bytes = tensor.storage().capacity();
#endif
ans->bytes_used = ans->num_bytes;
return ans;
}
} // namespace k2
|
c722cadfe57d71045befd0646808f0496ccc47a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/arg_min_max_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#if defined(__NVCC__) || defined(__HIPCC__)
#ifdef __NVCC__
#include "hipcub/hipcub.hpp"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include <limits>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/phi/core/ddim.h"
namespace phi {
namespace { // NOLINT
template <typename K, typename V>
using KeyValuePair = hipcub::KeyValuePair<K, V>;
} // end namespace
#define FIXED_BLOCK_DIM_CASE_BASE(log2_block_dim, ...) \
case (1 << (log2_block_dim)): { \
constexpr auto kBlockDim = (1 << (log2_block_dim)); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM_CASE(...) \
FIXED_BLOCK_DIM_CASE_BASE(10, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(9, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(8, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(7, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(6, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(5, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(4, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(3, ##__VA_ARGS__);
template <typename T, typename IndType, class Reducer, size_t BlockDim>
__global__ void ArgCUDAKernel(const int64_t height, // n * h
const int64_t width, // c
const int64_t post_size, // h
const Reducer reducer,
const T init,
const T* in,
IndType* out) {
typedef hipcub::BlockReduce<KeyValuePair<int, T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int idx = blockIdx.x; idx < height; idx += gridDim.x) {
KeyValuePair<int, T> kv_pair = {-1, init};
int h = idx / post_size;
int w = idx % post_size;
for (int k = threadIdx.x; k < width; k += blockDim.x) {
kv_pair =
reducer({k, in[h * width * post_size + k * post_size + w]}, kv_pair);
}
kv_pair = BlockReduce(temp_storage).Reduce(kv_pair, reducer);
if (threadIdx.x == 0) {
out[idx] = static_cast<IndType>(kv_pair.key);
}
__syncthreads();
}
}
template <typename T, typename IndType, class Reducer>
void ComputeFullArg(const phi::GPUContext& dev_ctx,
const DenseTensor& input,
DenseTensor* indices,
const int64_t pre,
const int64_t post,
const int64_t n) {
auto cu_stream = dev_ctx.stream();
auto ComputeBlockSize = [](int64_t col) {
auto block_size = 8;
if (col > 512)
block_size = 1024;
else if (col > 256)
block_size = 512;
else if (col > 128)
block_size = 256;
else if (col > 64)
block_size = 128;
else if (col > 32)
block_size = 64;
else if (col > 16)
block_size = 32;
else if (col > 8)
block_size = 16;
#ifdef __HIPCC__
block_size = ::min(block_size, 256);
#endif
return block_size;
};
int64_t max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int64_t height = pre * post;
int64_t width = n;
int64_t grid_size = height < max_grid_dimx ? height : max_grid_dimx;
const T* in_data = input.data<T>();
IndType* out_data = dev_ctx.template Alloc<IndType>(indices);
if (typeid(Reducer) == typeid(hipcub::ArgMax)) {
switch (ComputeBlockSize(width)) {
hipLaunchKernelGGL(( FIXED_BLOCK_DIM_CASE(ArgCUDAKernel<T, IndType, Reducer, kBlockDim>)
, dim3(grid_size), dim3(kBlockDim), 0, cu_stream,
height,
width,
post,
Reducer(),
std::numeric_limits<T>::lowest(),
in_data,
out_data));
}
} else {
switch (ComputeBlockSize(width)) {
hipLaunchKernelGGL(( FIXED_BLOCK_DIM_CASE(ArgCUDAKernel<T, IndType, Reducer, kBlockDim>)
, dim3(grid_size), dim3(kBlockDim), 0, cu_stream,
height,
width,
post,
Reducer(),
std::numeric_limits<T>::max(),
in_data,
out_data));
}
}
}
template <typename Context, typename T, class Reducer>
struct VisitDataCudaArgMinMaxFunctor {
const Context& dev_ctx;
const DenseTensor& x;
int64_t axis;
bool keepdims;
bool flatten;
DenseTensor* out;
explicit VisitDataCudaArgMinMaxFunctor(const Context& dev_ctx,
const DenseTensor& x,
int64_t axis,
bool keepdims,
bool flatten,
DenseTensor* out)
: dev_ctx(dev_ctx),
x(x),
axis(axis),
keepdims(keepdims),
flatten(flatten),
out(out) {}
template <typename IndType>
void apply() const {
phi::DDim x_dims;
int new_axis = axis;
if (flatten) {
x_dims = phi::make_ddim({x.numel()});
// if flatten, the axis just as 0
new_axis = 0;
} else {
x_dims = x.dims();
if (axis < 0) new_axis = axis + x.dims().size();
}
int64_t numel = x.numel();
int64_t groups = numel / x_dims[new_axis];
int64_t pre = 1;
int64_t post = 1;
int64_t n = x_dims[new_axis];
for (int i = 0; i < new_axis; i++) {
pre *= x_dims[i];
}
for (int i = new_axis + 1; i < x_dims.size(); i++) {
post *= x_dims[i];
}
ComputeFullArg<T, IndType, Reducer>(dev_ctx, x, out, pre, post, n);
}
};
template <typename Context, typename T, class Reducer>
void ArgMinMaxOpCUDAKernel(const Context& dev_ctx,
const DenseTensor& x,
int64_t axis,
bool keepdims,
bool flatten,
int dtype,
DenseTensor* out) {
if (dtype < 0) {
paddle::framework::VisitDataTypeTiny(
static_cast<paddle::framework::proto::VarType::Type>(
paddle::framework::proto::VarType::INT64),
VisitDataCudaArgMinMaxFunctor<Context, T, Reducer>(
dev_ctx, x, axis, keepdims, flatten, out));
return;
}
paddle::framework::VisitDataTypeTiny(
static_cast<paddle::framework::proto::VarType::Type>(dtype),
VisitDataCudaArgMinMaxFunctor<Context, T, Reducer>(
dev_ctx, x, axis, keepdims, flatten, out));
}
template <typename T, typename Context>
void ArgMinKernel(const Context& dev_ctx,
const DenseTensor& x,
int64_t axis,
bool keepdims,
bool flatten,
int dtype,
DenseTensor* out) {
ArgMinMaxOpCUDAKernel<Context, T, hipcub::ArgMin>(
dev_ctx, x, axis, keepdims, flatten, dtype, out);
}
template <typename T, typename Context>
void ArgMaxKernel(const Context& dev_ctx,
const DenseTensor& x,
int64_t axis,
bool keepdims,
bool flatten,
int dtype,
DenseTensor* out) {
ArgMinMaxOpCUDAKernel<Context, T, hipcub::ArgMax>(
dev_ctx, x, axis, keepdims, flatten, dtype, out);
}
#endif
} // namespace phi
PD_REGISTER_KERNEL(arg_min,
GPU,
ALL_LAYOUT,
phi::ArgMinKernel,
phi::dtype::float16,
float,
double,
int32_t,
int64_t,
int16_t,
uint8_t) {}
PD_REGISTER_KERNEL(arg_max,
GPU,
ALL_LAYOUT,
phi::ArgMaxKernel,
phi::dtype::float16,
float,
double,
int32_t,
int64_t,
int16_t,
uint8_t) {}
| c722cadfe57d71045befd0646808f0496ccc47a1.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/arg_min_max_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#if defined(__NVCC__) || defined(__HIPCC__)
#ifdef __NVCC__
#include "cub/cub.cuh"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include <limits>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/phi/core/ddim.h"
namespace phi {
namespace { // NOLINT
template <typename K, typename V>
using KeyValuePair = cub::KeyValuePair<K, V>;
} // end namespace
#define FIXED_BLOCK_DIM_CASE_BASE(log2_block_dim, ...) \
case (1 << (log2_block_dim)): { \
constexpr auto kBlockDim = (1 << (log2_block_dim)); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM_CASE(...) \
FIXED_BLOCK_DIM_CASE_BASE(10, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(9, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(8, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(7, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(6, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(5, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(4, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(3, ##__VA_ARGS__);
template <typename T, typename IndType, class Reducer, size_t BlockDim>
__global__ void ArgCUDAKernel(const int64_t height, // n * h
const int64_t width, // c
const int64_t post_size, // h
const Reducer reducer,
const T init,
const T* in,
IndType* out) {
typedef cub::BlockReduce<KeyValuePair<int, T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int idx = blockIdx.x; idx < height; idx += gridDim.x) {
KeyValuePair<int, T> kv_pair = {-1, init};
int h = idx / post_size;
int w = idx % post_size;
for (int k = threadIdx.x; k < width; k += blockDim.x) {
kv_pair =
reducer({k, in[h * width * post_size + k * post_size + w]}, kv_pair);
}
kv_pair = BlockReduce(temp_storage).Reduce(kv_pair, reducer);
if (threadIdx.x == 0) {
out[idx] = static_cast<IndType>(kv_pair.key);
}
__syncthreads();
}
}
template <typename T, typename IndType, class Reducer>
void ComputeFullArg(const phi::GPUContext& dev_ctx,
const DenseTensor& input,
DenseTensor* indices,
const int64_t pre,
const int64_t post,
const int64_t n) {
auto cu_stream = dev_ctx.stream();
auto ComputeBlockSize = [](int64_t col) {
auto block_size = 8;
if (col > 512)
block_size = 1024;
else if (col > 256)
block_size = 512;
else if (col > 128)
block_size = 256;
else if (col > 64)
block_size = 128;
else if (col > 32)
block_size = 64;
else if (col > 16)
block_size = 32;
else if (col > 8)
block_size = 16;
#ifdef __HIPCC__
block_size = std::min(block_size, 256);
#endif
return block_size;
};
int64_t max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int64_t height = pre * post;
int64_t width = n;
int64_t grid_size = height < max_grid_dimx ? height : max_grid_dimx;
const T* in_data = input.data<T>();
IndType* out_data = dev_ctx.template Alloc<IndType>(indices);
if (typeid(Reducer) == typeid(cub::ArgMax)) {
switch (ComputeBlockSize(width)) {
FIXED_BLOCK_DIM_CASE(ArgCUDAKernel<T, IndType, Reducer, kBlockDim>
<<<grid_size, kBlockDim, 0, cu_stream>>>(
height,
width,
post,
Reducer(),
std::numeric_limits<T>::lowest(),
in_data,
out_data));
}
} else {
switch (ComputeBlockSize(width)) {
FIXED_BLOCK_DIM_CASE(ArgCUDAKernel<T, IndType, Reducer, kBlockDim>
<<<grid_size, kBlockDim, 0, cu_stream>>>(
height,
width,
post,
Reducer(),
std::numeric_limits<T>::max(),
in_data,
out_data));
}
}
}
template <typename Context, typename T, class Reducer>
struct VisitDataCudaArgMinMaxFunctor {
const Context& dev_ctx;
const DenseTensor& x;
int64_t axis;
bool keepdims;
bool flatten;
DenseTensor* out;
explicit VisitDataCudaArgMinMaxFunctor(const Context& dev_ctx,
const DenseTensor& x,
int64_t axis,
bool keepdims,
bool flatten,
DenseTensor* out)
: dev_ctx(dev_ctx),
x(x),
axis(axis),
keepdims(keepdims),
flatten(flatten),
out(out) {}
template <typename IndType>
void apply() const {
phi::DDim x_dims;
int new_axis = axis;
if (flatten) {
x_dims = phi::make_ddim({x.numel()});
// if flatten, the axis just as 0
new_axis = 0;
} else {
x_dims = x.dims();
if (axis < 0) new_axis = axis + x.dims().size();
}
int64_t numel = x.numel();
int64_t groups = numel / x_dims[new_axis];
int64_t pre = 1;
int64_t post = 1;
int64_t n = x_dims[new_axis];
for (int i = 0; i < new_axis; i++) {
pre *= x_dims[i];
}
for (int i = new_axis + 1; i < x_dims.size(); i++) {
post *= x_dims[i];
}
ComputeFullArg<T, IndType, Reducer>(dev_ctx, x, out, pre, post, n);
}
};
template <typename Context, typename T, class Reducer>
void ArgMinMaxOpCUDAKernel(const Context& dev_ctx,
const DenseTensor& x,
int64_t axis,
bool keepdims,
bool flatten,
int dtype,
DenseTensor* out) {
if (dtype < 0) {
paddle::framework::VisitDataTypeTiny(
static_cast<paddle::framework::proto::VarType::Type>(
paddle::framework::proto::VarType::INT64),
VisitDataCudaArgMinMaxFunctor<Context, T, Reducer>(
dev_ctx, x, axis, keepdims, flatten, out));
return;
}
paddle::framework::VisitDataTypeTiny(
static_cast<paddle::framework::proto::VarType::Type>(dtype),
VisitDataCudaArgMinMaxFunctor<Context, T, Reducer>(
dev_ctx, x, axis, keepdims, flatten, out));
}
template <typename T, typename Context>
void ArgMinKernel(const Context& dev_ctx,
const DenseTensor& x,
int64_t axis,
bool keepdims,
bool flatten,
int dtype,
DenseTensor* out) {
ArgMinMaxOpCUDAKernel<Context, T, cub::ArgMin>(
dev_ctx, x, axis, keepdims, flatten, dtype, out);
}
template <typename T, typename Context>
void ArgMaxKernel(const Context& dev_ctx,
const DenseTensor& x,
int64_t axis,
bool keepdims,
bool flatten,
int dtype,
DenseTensor* out) {
ArgMinMaxOpCUDAKernel<Context, T, cub::ArgMax>(
dev_ctx, x, axis, keepdims, flatten, dtype, out);
}
#endif
} // namespace phi
PD_REGISTER_KERNEL(arg_min,
GPU,
ALL_LAYOUT,
phi::ArgMinKernel,
phi::dtype::float16,
float,
double,
int32_t,
int64_t,
int16_t,
uint8_t) {}
PD_REGISTER_KERNEL(arg_max,
GPU,
ALL_LAYOUT,
phi::ArgMaxKernel,
phi::dtype::float16,
float,
double,
int32_t,
int64_t,
int16_t,
uint8_t) {}
|
4cd31b4ab9ae998051a87020a319a115487fefb7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#define MAXLINESIZE 100
__device__ int dNUM;
__device__ int dBLOCK_N;
__device__ int dTHREAD_N;
__device__ float dx1;
__device__ float dy1;
__device__ float dx2;
__device__ float dy2;
__device__ float dx3;
__device__ float dy3;
// NUmber of threads = 96, Number of SM = 2, Number of cores per SM = 48
__global__
void calculate_triliteration(float *dda, float *ddb, float *ddc, float *dx, float *dy){
extern __shared__ float temp[];
float *tempX = temp;
float *tempY = (float *)&tempX[dTHREAD_N];
int blockId = blockIdx.x;
int threadId = threadIdx.x;
int idx = blockId * dTHREAD_N + threadId;
int totalThreads = dTHREAD_N * dBLOCK_N;
float dx1dx2 = pow(dx1, 2) - pow(dx2, 2);
float dy1dy2 = pow(dy1, 2) - pow(dy2, 2);
float dx1dx3 = pow(dx1, 2) - pow(dx3, 2);
float dy1dy3 = pow(dy1, 2) - pow(dy3, 2);
float dy2dy1 = 2 * (dy2 - dy1);
float dy3dy1 = 2 * (dy3 - dy1);
float dx2dx1 = 2 * (dx2 - dx1);
float dx3dx1 = 2 * (dx3 - dx1);
float denX = dx2dx1 * dy3dy1 - dx3dx1 * dy2dy1;
for(int i = idx; i < dNUM; i += totalThreads){
if(denX == 0){
if(i % 4 == 0){
dx[i/4] = 0;
dy[i/4] = 0;
}
return;
}
float ddaddb = pow(dda[i], 2) - pow(ddb[i], 2);
float ddaddc = pow(dda[i], 2) - pow(ddc[i], 2);
float numX = (ddaddb - dx1dx2 - dy1dy2) * dy3dy1 - (ddaddc - dx1dx3 - dy1dy3) * dy2dy1;
float numY = (ddaddc - dx1dx3 - dy1dy3) * dx2dx1 - (ddaddb - dx1dx2 - dy1dy2) * dx3dx1;
tempX[threadId] = numX / denX;
tempY[threadId] = numY / denX;
/* printf("The output is => %f %f\n", tempX[threadId], tempY[threadId]);*/
__syncthreads();
if(i % 4 == 0){
dx[i/4] = (tempX[threadId] + tempX[threadId + 1] + tempX[threadId + 2] + tempX[threadId + 3] ) / 4;
dy[i/4] = (tempY[threadId] + tempY[threadId + 1] + tempY[threadId + 2] + tempY[threadId + 3] ) / 4;
}
__syncthreads();
}
}
int main(int args, char ** argv){
clock_t begin = clock();
if(args != 6){
printf("Invalid Arguments\nUsage: ./triliteration <NUM> <BLOCK_N> <THREAD_N> <INPUT_FILE> <CHECK_FILE>\n");
return -1;
}
int NUM = pow(2, atoi(argv[1]));
int BLOCK_N = atoi(argv[2]);
int THREAD_N = atoi(argv[3]);
char *INPUT_FILE = argv[4];
char *CHECK_FILE = argv[5];
printf("\n\tInput Size : %d\n", NUM);
printf("\tBlock_N : %d\n", BLOCK_N);
printf("\tTHREAD_N : %d\n", THREAD_N);
FILE * input_fd, * check_fd;
input_fd = fopen(INPUT_FILE, "r");
char line[MAXLINESIZE];
int line_count = 0;
float da[NUM],db[NUM],dc[NUM], x[NUM / 4], y[NUM / 4];
float x1 = 0, y1 = 0, x2 = 0, y2 = 0, x3 = 0, y3 = 0;
if(fgets(line, MAXLINESIZE, input_fd) != NULL){
sscanf(line, "%f %f %f %f %f %f\n", &x1, &y1, &x2, &y2, &x3, &y3);
}
/*x1 = 0.0;
y1 = 0.0;
x2 = 20000.0;
y2 = 20000.0;
x3 = 40000.0;
y3 = 0.0;*/
printf("\nThe Guard points:\n\t%f, %f \n\t%f, %f\n\t%f, %f\n", x1, y1, x2, y2, x3, y3);
while ( fgets(line, MAXLINESIZE, input_fd) != NULL && line_count < NUM) {
da[line_count] = 0.0;
db[line_count] = 0.0;
dc[line_count] = 0.0;
/*printf("The line is %s\n",line );*/
sscanf(line, "%f %f %f\n", &da[line_count], &db[line_count], &dc[line_count]);
/*printf("the values are => %f %f %f\n", da[line_count], db[line_count], dc[line_count]);*/
//da[line_count] = 3.0; db[line_count] = 3.0; dc[line_count] = 3.0;
line_count ++;
}
float * dda, * ddb, * ddc;
float * dx, * dy;
int _floatSize = NUM * sizeof(float);
hipMalloc( (void**)&dda, _floatSize);
hipMalloc( (void**)&ddb, _floatSize);
hipMalloc( (void**)&ddc, _floatSize);
hipMalloc( (void**)&dx, _floatSize / 4);
hipMalloc( (void**)&dy, _floatSize / 4);
hipMemcpyToSymbol(dNUM, &NUM, sizeof(int));
hipMemcpyToSymbol(dBLOCK_N, &BLOCK_N, sizeof(int));
hipMemcpyToSymbol(dTHREAD_N, &THREAD_N, sizeof(int));
hipMemcpy( dda, da, _floatSize, hipMemcpyHostToDevice );
hipMemcpy( ddb, db, _floatSize, hipMemcpyHostToDevice );
hipMemcpy( ddc, dc, _floatSize, hipMemcpyHostToDevice );
hipMemcpyToSymbol( dx1, &x1, sizeof(float));
hipMemcpyToSymbol( dy1, &y1, sizeof(float));
hipMemcpyToSymbol( dx2, &x2, sizeof(float));
hipMemcpyToSymbol( dy2, &y2, sizeof(float));
hipMemcpyToSymbol( dx3, &x3, sizeof(float));
hipMemcpyToSymbol( dy3, &y3, sizeof(float));
hipLaunchKernelGGL(( calculate_triliteration), dim3(BLOCK_N), dim3(THREAD_N), 2 * THREAD_N * sizeof(float), 0, dda, ddb, ddc, dx, dy);
hipMemcpy( x, dx, _floatSize / 4, hipMemcpyDeviceToHost);
hipMemcpy( y, dy, _floatSize / 4, hipMemcpyDeviceToHost);
printf("Legend : \n\t. => difference less than 0.1,\n\tX => difference greater than 0.1 \n");
check_fd = fopen(CHECK_FILE, "r");
float avgX = 0.0, avgY= 0.0;
//printf("Original X and Y\t Calculated X and Y\n");
for(int i = 0; i < NUM / 4; i ++){
fgets(line, MAXLINESIZE, check_fd);
sscanf(line, "%f %f", &avgX, &avgY);
if(abs(x[i] - avgX) <= 0.1 && abs(y[i] - avgY) <= 0.1){
printf(".");
}else{
printf("X");
}
//printf("%f, %f\t%f, %f\n", x[i], y[i], avgX, avgY);
}
hipFree(dda);
hipFree(ddb);
hipFree(ddc);
hipFree(dx);
hipFree(dy);
fclose(input_fd);
fclose(check_fd);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("\nExecution time is %f\n", time_spent);
return 1;
} | 4cd31b4ab9ae998051a87020a319a115487fefb7.cu | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#define MAXLINESIZE 100
__device__ int dNUM;
__device__ int dBLOCK_N;
__device__ int dTHREAD_N;
__device__ float dx1;
__device__ float dy1;
__device__ float dx2;
__device__ float dy2;
__device__ float dx3;
__device__ float dy3;
// NUmber of threads = 96, Number of SM = 2, Number of cores per SM = 48
__global__
void calculate_triliteration(float *dda, float *ddb, float *ddc, float *dx, float *dy){
extern __shared__ float temp[];
float *tempX = temp;
float *tempY = (float *)&tempX[dTHREAD_N];
int blockId = blockIdx.x;
int threadId = threadIdx.x;
int idx = blockId * dTHREAD_N + threadId;
int totalThreads = dTHREAD_N * dBLOCK_N;
float dx1dx2 = pow(dx1, 2) - pow(dx2, 2);
float dy1dy2 = pow(dy1, 2) - pow(dy2, 2);
float dx1dx3 = pow(dx1, 2) - pow(dx3, 2);
float dy1dy3 = pow(dy1, 2) - pow(dy3, 2);
float dy2dy1 = 2 * (dy2 - dy1);
float dy3dy1 = 2 * (dy3 - dy1);
float dx2dx1 = 2 * (dx2 - dx1);
float dx3dx1 = 2 * (dx3 - dx1);
float denX = dx2dx1 * dy3dy1 - dx3dx1 * dy2dy1;
for(int i = idx; i < dNUM; i += totalThreads){
if(denX == 0){
if(i % 4 == 0){
dx[i/4] = 0;
dy[i/4] = 0;
}
return;
}
float ddaddb = pow(dda[i], 2) - pow(ddb[i], 2);
float ddaddc = pow(dda[i], 2) - pow(ddc[i], 2);
float numX = (ddaddb - dx1dx2 - dy1dy2) * dy3dy1 - (ddaddc - dx1dx3 - dy1dy3) * dy2dy1;
float numY = (ddaddc - dx1dx3 - dy1dy3) * dx2dx1 - (ddaddb - dx1dx2 - dy1dy2) * dx3dx1;
tempX[threadId] = numX / denX;
tempY[threadId] = numY / denX;
/* printf("The output is => %f %f\n", tempX[threadId], tempY[threadId]);*/
__syncthreads();
if(i % 4 == 0){
dx[i/4] = (tempX[threadId] + tempX[threadId + 1] + tempX[threadId + 2] + tempX[threadId + 3] ) / 4;
dy[i/4] = (tempY[threadId] + tempY[threadId + 1] + tempY[threadId + 2] + tempY[threadId + 3] ) / 4;
}
__syncthreads();
}
}
int main(int args, char ** argv){
clock_t begin = clock();
if(args != 6){
printf("Invalid Arguments\nUsage: ./triliteration <NUM> <BLOCK_N> <THREAD_N> <INPUT_FILE> <CHECK_FILE>\n");
return -1;
}
int NUM = pow(2, atoi(argv[1]));
int BLOCK_N = atoi(argv[2]);
int THREAD_N = atoi(argv[3]);
char *INPUT_FILE = argv[4];
char *CHECK_FILE = argv[5];
printf("\n\tInput Size : %d\n", NUM);
printf("\tBlock_N : %d\n", BLOCK_N);
printf("\tTHREAD_N : %d\n", THREAD_N);
FILE * input_fd, * check_fd;
input_fd = fopen(INPUT_FILE, "r");
char line[MAXLINESIZE];
int line_count = 0;
float da[NUM],db[NUM],dc[NUM], x[NUM / 4], y[NUM / 4];
float x1 = 0, y1 = 0, x2 = 0, y2 = 0, x3 = 0, y3 = 0;
if(fgets(line, MAXLINESIZE, input_fd) != NULL){
sscanf(line, "%f %f %f %f %f %f\n", &x1, &y1, &x2, &y2, &x3, &y3);
}
/*x1 = 0.0;
y1 = 0.0;
x2 = 20000.0;
y2 = 20000.0;
x3 = 40000.0;
y3 = 0.0;*/
printf("\nThe Guard points:\n\t%f, %f \n\t%f, %f\n\t%f, %f\n", x1, y1, x2, y2, x3, y3);
while ( fgets(line, MAXLINESIZE, input_fd) != NULL && line_count < NUM) {
da[line_count] = 0.0;
db[line_count] = 0.0;
dc[line_count] = 0.0;
/*printf("The line is %s\n",line );*/
sscanf(line, "%f %f %f\n", &da[line_count], &db[line_count], &dc[line_count]);
/*printf("the values are => %f %f %f\n", da[line_count], db[line_count], dc[line_count]);*/
//da[line_count] = 3.0; db[line_count] = 3.0; dc[line_count] = 3.0;
line_count ++;
}
float * dda, * ddb, * ddc;
float * dx, * dy;
int _floatSize = NUM * sizeof(float);
cudaMalloc( (void**)&dda, _floatSize);
cudaMalloc( (void**)&ddb, _floatSize);
cudaMalloc( (void**)&ddc, _floatSize);
cudaMalloc( (void**)&dx, _floatSize / 4);
cudaMalloc( (void**)&dy, _floatSize / 4);
cudaMemcpyToSymbol(dNUM, &NUM, sizeof(int));
cudaMemcpyToSymbol(dBLOCK_N, &BLOCK_N, sizeof(int));
cudaMemcpyToSymbol(dTHREAD_N, &THREAD_N, sizeof(int));
cudaMemcpy( dda, da, _floatSize, cudaMemcpyHostToDevice );
cudaMemcpy( ddb, db, _floatSize, cudaMemcpyHostToDevice );
cudaMemcpy( ddc, dc, _floatSize, cudaMemcpyHostToDevice );
cudaMemcpyToSymbol( dx1, &x1, sizeof(float));
cudaMemcpyToSymbol( dy1, &y1, sizeof(float));
cudaMemcpyToSymbol( dx2, &x2, sizeof(float));
cudaMemcpyToSymbol( dy2, &y2, sizeof(float));
cudaMemcpyToSymbol( dx3, &x3, sizeof(float));
cudaMemcpyToSymbol( dy3, &y3, sizeof(float));
calculate_triliteration<<<BLOCK_N, THREAD_N, 2 * THREAD_N * sizeof(float)>>>(dda, ddb, ddc, dx, dy);
cudaMemcpy( x, dx, _floatSize / 4, cudaMemcpyDeviceToHost);
cudaMemcpy( y, dy, _floatSize / 4, cudaMemcpyDeviceToHost);
printf("Legend : \n\t. => difference less than 0.1,\n\tX => difference greater than 0.1 \n");
check_fd = fopen(CHECK_FILE, "r");
float avgX = 0.0, avgY= 0.0;
//printf("Original X and Y\t Calculated X and Y\n");
for(int i = 0; i < NUM / 4; i ++){
fgets(line, MAXLINESIZE, check_fd);
sscanf(line, "%f %f", &avgX, &avgY);
if(abs(x[i] - avgX) <= 0.1 && abs(y[i] - avgY) <= 0.1){
printf(".");
}else{
printf("X");
}
//printf("%f, %f\t%f, %f\n", x[i], y[i], avgX, avgY);
}
cudaFree(dda);
cudaFree(ddb);
cudaFree(ddc);
cudaFree(dx);
cudaFree(dy);
fclose(input_fd);
fclose(check_fd);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("\nExecution time is %f\n", time_spent);
return 1;
} |
064e5fabd5a6e8b97beca237f603173c00c33ecf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "dali/kernels/imgproc/color_manipulation/equalize/lookup.h"
namespace dali {
namespace kernels {
namespace equalize {
namespace lookup {
__global__ void Lookup(const SampleDesc *sample_descs) {
auto sample_desc = sample_descs[blockIdx.y];
for (uint64_t idx = static_cast<uint64_t>(blockIdx.x) * blockDim.x + threadIdx.x;
idx < sample_desc.num_elements;
idx += static_cast<uint64_t>(blockDim.x) * gridDim.x) {
const uint8_t *in = sample_desc.in;
uint8_t *out = sample_desc.out;
uint64_t channel_idx = idx % sample_desc.num_channels;
out[idx] = __ldg(sample_desc.lut + channel_idx * SampleDesc::range_size + in[idx]);
}
}
void LookupKernelGpu::Run(KernelContext &ctx, const TensorListView<StorageGPU, uint8_t, 2> &out,
const TensorListView<StorageGPU, const uint8_t, 2> &in,
const TensorListView<StorageGPU, const uint8_t, 2> &lut) {
int batch_size = out.num_samples();
assert(in.num_samples() == batch_size && lut.num_samples() == batch_size);
sample_descs_.clear();
sample_descs_.reserve(batch_size);
int64_t max_num_blocks = 0;
for (int sample_idx = 0; sample_idx < batch_size; sample_idx++) {
int64_t num_channels = in.shape[sample_idx][1];
int64_t num_elements = in.shape[sample_idx].num_elements();
assert(num_channels == lut.shape[sample_idx][0] && num_channels == out.shape[sample_idx][1]);
int64_t num_blocks = div_ceil(num_elements, kBlockSize);
max_num_blocks = ::max(max_num_blocks, num_blocks);
sample_descs_.push_back({out.data[sample_idx], in.data[sample_idx], lut.data[sample_idx],
static_cast<uint64_t>(num_elements),
static_cast<uint64_t>(num_channels)});
}
max_num_blocks = ::min(max_num_blocks, kMaxGridSize);
SampleDesc *samples_desc_dev;
std::tie(samples_desc_dev) = ctx.scratchpad->ToContiguousGPU(ctx.gpu.stream, sample_descs_);
dim3 grid{static_cast<unsigned int>(max_num_blocks), static_cast<unsigned int>(batch_size)};
hipLaunchKernelGGL(( Lookup), dim3(grid), dim3(kBlockSize), 0, ctx.gpu.stream, samples_desc_dev);
CUDA_CALL(hipGetLastError());
}
} // namespace lookup
} // namespace equalize
} // namespace kernels
} // namespace dali
| 064e5fabd5a6e8b97beca237f603173c00c33ecf.cu | // Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "dali/kernels/imgproc/color_manipulation/equalize/lookup.h"
namespace dali {
namespace kernels {
namespace equalize {
namespace lookup {
__global__ void Lookup(const SampleDesc *sample_descs) {
auto sample_desc = sample_descs[blockIdx.y];
for (uint64_t idx = static_cast<uint64_t>(blockIdx.x) * blockDim.x + threadIdx.x;
idx < sample_desc.num_elements;
idx += static_cast<uint64_t>(blockDim.x) * gridDim.x) {
const uint8_t *in = sample_desc.in;
uint8_t *out = sample_desc.out;
uint64_t channel_idx = idx % sample_desc.num_channels;
out[idx] = __ldg(sample_desc.lut + channel_idx * SampleDesc::range_size + in[idx]);
}
}
void LookupKernelGpu::Run(KernelContext &ctx, const TensorListView<StorageGPU, uint8_t, 2> &out,
const TensorListView<StorageGPU, const uint8_t, 2> &in,
const TensorListView<StorageGPU, const uint8_t, 2> &lut) {
int batch_size = out.num_samples();
assert(in.num_samples() == batch_size && lut.num_samples() == batch_size);
sample_descs_.clear();
sample_descs_.reserve(batch_size);
int64_t max_num_blocks = 0;
for (int sample_idx = 0; sample_idx < batch_size; sample_idx++) {
int64_t num_channels = in.shape[sample_idx][1];
int64_t num_elements = in.shape[sample_idx].num_elements();
assert(num_channels == lut.shape[sample_idx][0] && num_channels == out.shape[sample_idx][1]);
int64_t num_blocks = div_ceil(num_elements, kBlockSize);
max_num_blocks = std::max(max_num_blocks, num_blocks);
sample_descs_.push_back({out.data[sample_idx], in.data[sample_idx], lut.data[sample_idx],
static_cast<uint64_t>(num_elements),
static_cast<uint64_t>(num_channels)});
}
max_num_blocks = std::min(max_num_blocks, kMaxGridSize);
SampleDesc *samples_desc_dev;
std::tie(samples_desc_dev) = ctx.scratchpad->ToContiguousGPU(ctx.gpu.stream, sample_descs_);
dim3 grid{static_cast<unsigned int>(max_num_blocks), static_cast<unsigned int>(batch_size)};
Lookup<<<grid, kBlockSize, 0, ctx.gpu.stream>>>(samples_desc_dev);
CUDA_CALL(cudaGetLastError());
}
} // namespace lookup
} // namespace equalize
} // namespace kernels
} // namespace dali
|
188ef1b107fc7384b6d03e9e1b7803a68018a44c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bitonic_sort.cuh"
template <class X>
__device__ __forceinline__
void swap(X &a,X&b){
const X tmp = a; a = b; b = tmp;
}
template <class X>
__device__ __forceinline__
void swap(X &key,X &val,int mask,int dir){
const X y = __shfl_xor(key,mask);
if(key < y == dir){
key = y;
val = __shfl_xor(val,mask);
}
}
//extract k th bit of i
__device__ inline
int bfe(int i, int k){
return (i>>k) & 0x01;
}
template <int N, class X>
__global__
void bitonic_sort_key(X *gkeys,X *gvals){
X *keys = gkeys + blockDim.x*blockIdx.x;
X *vals = gvals + blockDim.x*blockIdx.x;
int i = threadIdx.x;
#if 0
for(int k=2;k <= N;k<<=1){
for(int j=k>>1;j>0;j>>=1){
int ixj = i^j;
if ((ixj)>i) {
if ((i&k)==0) {
/* Sort ascending */
if (keys[i]>keys[ixj]) {
swap(keys[i],keys[ixj]);
swap(vals[i],vals[ixj]);
}
}
if ((i&k)!=0) {
/* Sort descending */
if (keys[i]<keys[ixj]) {
swap(keys[i],keys[ixj]);
swap(vals[i],vals[ixj]);
}
}
}
__syncthreads();
}
}
#else
const int laneid = threadIdx.x % WarpSize;
//const int warpid = threadIdx.x / WarpSize;
X key = keys[i];
X val = vals[i];
if(N>=2){
swap(key, val, 0x01, bfe(laneid, 1) ^ bfe(laneid, 0)); // 2
}
if(N>=4){
swap(key, val, 0x02, bfe(laneid, 2) ^ bfe(laneid, 1)); // 4
swap(key, val, 0x01, bfe(laneid, 2) ^ bfe(laneid, 0));
}
if(N>=8){
swap(key, val, 0x04, bfe(laneid, 3) ^ bfe(laneid, 2)); // 8
swap(key, val, 0x02, bfe(laneid, 3) ^ bfe(laneid, 1));
swap(key, val, 0x01, bfe(laneid, 3) ^ bfe(laneid, 0));
}
if(N>=16){
swap(key, val, 0x08, bfe(laneid, 4) ^ bfe(laneid, 3)); // 16
swap(key, val, 0x04, bfe(laneid, 4) ^ bfe(laneid, 2));
swap(key, val, 0x02, bfe(laneid, 4) ^ bfe(laneid, 1));
swap(key, val, 0x01, bfe(laneid, 4) ^ bfe(laneid, 0));
}
if(N>=32){
swap(key, val, 0x10, bfe(laneid, 4)); // 32
swap(key, val, 0x08, bfe(laneid, 3));
swap(key, val, 0x04, bfe(laneid, 2));
swap(key, val, 0x02, bfe(laneid, 1));
swap(key, val, 0x01, bfe(laneid, 0));
}
__shared__ X skey[N];
__shared__ X sval[N];
skey[i] = key;
sval[i] = val;
__syncthreads();
for(int k=32;k <= N;k<<=1){
for(int j=k>>1;j>0;j>>=1){
int ixj = i^j;
if ((ixj)>i) {
if ((i&k)==0) {
/* Sort ascending */
if (skey[i]>skey[ixj]) {
swap(skey[i],skey[ixj]);
swap(sval[i],sval[ixj]);
}
}
if ((i&k)!=0) {
/* Sort descending */
if (skey[i]<skey[ixj]) {
swap(skey[i],skey[ixj]);
swap(sval[i],sval[ixj]);
}
}
}
__syncthreads();
}
}
keys[i] = skey[i];
vals[i] = sval[i];
#endif
}
template __global__ void bitonic_sort_key< 2,int>(int*,int*);
template __global__ void bitonic_sort_key< 4,int>(int*,int*);
template __global__ void bitonic_sort_key< 8,int>(int*,int*);
template __global__ void bitonic_sort_key< 16,int>(int*,int*);
template __global__ void bitonic_sort_key< 32,int>(int*,int*);
template __global__ void bitonic_sort_key< 64,int>(int*,int*);
template __global__ void bitonic_sort_key< 128,int>(int*,int*);
template __global__ void bitonic_sort_key< 256,int>(int*,int*);
template __global__ void bitonic_sort_key< 512,int>(int*,int*);
template __global__ void bitonic_sort_key<1024,int>(int*,int*);
//template __global__ void bitonic_sort_key<2048,int>(int*,int*); // there is a bug for 2048 element
| 188ef1b107fc7384b6d03e9e1b7803a68018a44c.cu |
#include "bitonic_sort.cuh"
template <class X>
__device__ __forceinline__
void swap(X &a,X&b){
const X tmp = a; a = b; b = tmp;
}
template <class X>
__device__ __forceinline__
void swap(X &key,X &val,int mask,int dir){
const X y = __shfl_xor(key,mask);
if(key < y == dir){
key = y;
val = __shfl_xor(val,mask);
}
}
//extract k th bit of i
__device__ inline
int bfe(int i, int k){
return (i>>k) & 0x01;
}
template <int N, class X>
__global__
void bitonic_sort_key(X *gkeys,X *gvals){
X *keys = gkeys + blockDim.x*blockIdx.x;
X *vals = gvals + blockDim.x*blockIdx.x;
int i = threadIdx.x;
#if 0
for(int k=2;k <= N;k<<=1){
for(int j=k>>1;j>0;j>>=1){
int ixj = i^j;
if ((ixj)>i) {
if ((i&k)==0) {
/* Sort ascending */
if (keys[i]>keys[ixj]) {
swap(keys[i],keys[ixj]);
swap(vals[i],vals[ixj]);
}
}
if ((i&k)!=0) {
/* Sort descending */
if (keys[i]<keys[ixj]) {
swap(keys[i],keys[ixj]);
swap(vals[i],vals[ixj]);
}
}
}
__syncthreads();
}
}
#else
const int laneid = threadIdx.x % WarpSize;
//const int warpid = threadIdx.x / WarpSize;
X key = keys[i];
X val = vals[i];
if(N>=2){
swap(key, val, 0x01, bfe(laneid, 1) ^ bfe(laneid, 0)); // 2
}
if(N>=4){
swap(key, val, 0x02, bfe(laneid, 2) ^ bfe(laneid, 1)); // 4
swap(key, val, 0x01, bfe(laneid, 2) ^ bfe(laneid, 0));
}
if(N>=8){
swap(key, val, 0x04, bfe(laneid, 3) ^ bfe(laneid, 2)); // 8
swap(key, val, 0x02, bfe(laneid, 3) ^ bfe(laneid, 1));
swap(key, val, 0x01, bfe(laneid, 3) ^ bfe(laneid, 0));
}
if(N>=16){
swap(key, val, 0x08, bfe(laneid, 4) ^ bfe(laneid, 3)); // 16
swap(key, val, 0x04, bfe(laneid, 4) ^ bfe(laneid, 2));
swap(key, val, 0x02, bfe(laneid, 4) ^ bfe(laneid, 1));
swap(key, val, 0x01, bfe(laneid, 4) ^ bfe(laneid, 0));
}
if(N>=32){
swap(key, val, 0x10, bfe(laneid, 4)); // 32
swap(key, val, 0x08, bfe(laneid, 3));
swap(key, val, 0x04, bfe(laneid, 2));
swap(key, val, 0x02, bfe(laneid, 1));
swap(key, val, 0x01, bfe(laneid, 0));
}
__shared__ X skey[N];
__shared__ X sval[N];
skey[i] = key;
sval[i] = val;
__syncthreads();
for(int k=32;k <= N;k<<=1){
for(int j=k>>1;j>0;j>>=1){
int ixj = i^j;
if ((ixj)>i) {
if ((i&k)==0) {
/* Sort ascending */
if (skey[i]>skey[ixj]) {
swap(skey[i],skey[ixj]);
swap(sval[i],sval[ixj]);
}
}
if ((i&k)!=0) {
/* Sort descending */
if (skey[i]<skey[ixj]) {
swap(skey[i],skey[ixj]);
swap(sval[i],sval[ixj]);
}
}
}
__syncthreads();
}
}
keys[i] = skey[i];
vals[i] = sval[i];
#endif
}
template __global__ void bitonic_sort_key< 2,int>(int*,int*);
template __global__ void bitonic_sort_key< 4,int>(int*,int*);
template __global__ void bitonic_sort_key< 8,int>(int*,int*);
template __global__ void bitonic_sort_key< 16,int>(int*,int*);
template __global__ void bitonic_sort_key< 32,int>(int*,int*);
template __global__ void bitonic_sort_key< 64,int>(int*,int*);
template __global__ void bitonic_sort_key< 128,int>(int*,int*);
template __global__ void bitonic_sort_key< 256,int>(int*,int*);
template __global__ void bitonic_sort_key< 512,int>(int*,int*);
template __global__ void bitonic_sort_key<1024,int>(int*,int*);
//template __global__ void bitonic_sort_key<2048,int>(int*,int*); // there is a bug for 2048 element
|
36737563b8503f51c4c9ba3c15471e35ec61728d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define N 10000000
#define TAILLE_BLOC_X 32
#define TAILLE_BLOC_Y 1
#define T 1
#define F 0
/*====================*/
/* KERNEL DECLARATION */
/*====================*/
__global__ void primeKernel (int *tabGPU, int n)
{
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x + 1;
unsigned int j = 2;
while ((j*j)<=i)
{
if (i%j==0){
/*printf("%d -- not prime\n",i);*/
tabGPU[i-1]=F;
return;
}
j++;
}
tabGPU[i-1]=T;
/*printf("%d -- prime\n",i);*/
}
/*=================*/
/* CPU DECLARATION */
/*=================*/
void primeCPU (int *tabCPU, int n)
{
int i,j;
int est_premier;
for (i=2; i<n; i++){
est_premier=T;
j=2;
while ((j*j) <= n){
if ((i%j) == 0)
{
est_premier=F;
tabCPU[i-2]=F;
break;
}
j++;
}
tabCPU[i-2]=T;
}
}
/*==================*/
/* MAIN DECLARATION */
/*==================*/
double my_gettimeofday()
{
struct timeval tmp_time;
gettimeofday(&tmp_time, NULL);
return tmp_time.tv_sec + (tmp_time.tv_usec * 1.0e-6L);
}
int main()
{
int *tabCPU, *tabGPU;
double deb, fin;
/* Allocation CPU */
tabCPU = (int*)malloc(N*sizeof(int));
/* Allocation GPU */
hipMalloc((void**)&tabGPU, N*sizeof(int));
/* Transferts CPU -> GPU */
hipMemcpy(tabCPU, tabGPU, N, hipMemcpyHostToDevice);
/* Lancement de kernel */
dim3 threadsParBloc(TAILLE_BLOC_X);
dim3 nbBlocs(ceil(N/TAILLE_BLOC_X));
deb = my_gettimeofday();
hipLaunchKernelGGL(( primeKernel), dim3(nbBlocs), dim3(threadsParBloc), 0, 0, tabGPU, N);
/* Attente de la fin du calcul GPU */
/*hipDeviceSynchronize();*/
/* Transferts GPU -> CPU */
hipMemcpy(tabCPU, tabGPU, N, hipMemcpyDeviceToHost);
fin = my_gettimeofday();
printf("TIME GPU [%d] -- %g(s)\n",N,(fin-deb));
/* Verification */
deb = my_gettimeofday();
primeCPU(tabCPU, N);
fin = my_gettimeofday();
printf("TIME CPU [%d] -- %g(s)\n",N,(fin-deb));
return 0;
}
| 36737563b8503f51c4c9ba3c15471e35ec61728d.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define N 10000000
#define TAILLE_BLOC_X 32
#define TAILLE_BLOC_Y 1
#define T 1
#define F 0
/*====================*/
/* KERNEL DECLARATION */
/*====================*/
__global__ void primeKernel (int *tabGPU, int n)
{
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x + 1;
unsigned int j = 2;
while ((j*j)<=i)
{
if (i%j==0){
/*printf("%d -- not prime\n",i);*/
tabGPU[i-1]=F;
return;
}
j++;
}
tabGPU[i-1]=T;
/*printf("%d -- prime\n",i);*/
}
/*=================*/
/* CPU DECLARATION */
/*=================*/
void primeCPU (int *tabCPU, int n)
{
int i,j;
int est_premier;
for (i=2; i<n; i++){
est_premier=T;
j=2;
while ((j*j) <= n){
if ((i%j) == 0)
{
est_premier=F;
tabCPU[i-2]=F;
break;
}
j++;
}
tabCPU[i-2]=T;
}
}
/*==================*/
/* MAIN DECLARATION */
/*==================*/
double my_gettimeofday()
{
struct timeval tmp_time;
gettimeofday(&tmp_time, NULL);
return tmp_time.tv_sec + (tmp_time.tv_usec * 1.0e-6L);
}
int main()
{
int *tabCPU, *tabGPU;
double deb, fin;
/* Allocation CPU */
tabCPU = (int*)malloc(N*sizeof(int));
/* Allocation GPU */
cudaMalloc((void**)&tabGPU, N*sizeof(int));
/* Transferts CPU -> GPU */
cudaMemcpy(tabCPU, tabGPU, N, cudaMemcpyHostToDevice);
/* Lancement de kernel */
dim3 threadsParBloc(TAILLE_BLOC_X);
dim3 nbBlocs(ceil(N/TAILLE_BLOC_X));
deb = my_gettimeofday();
primeKernel<<<nbBlocs, threadsParBloc>>>(tabGPU, N);
/* Attente de la fin du calcul GPU */
/*cudaDeviceSynchronize();*/
/* Transferts GPU -> CPU */
cudaMemcpy(tabCPU, tabGPU, N, cudaMemcpyDeviceToHost);
fin = my_gettimeofday();
printf("TIME GPU [%d] -- %g(s)\n",N,(fin-deb));
/* Verification */
deb = my_gettimeofday();
primeCPU(tabCPU, N);
fin = my_gettimeofday();
printf("TIME CPU [%d] -- %g(s)\n",N,(fin-deb));
return 0;
}
|
a05045c4cb0a155bb5bec666f73e605fa27807dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This file was modified from sputnik to implement batch support for
// sddmm directly in the kernels
// Copyright 2020 The Sputnik Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <cmath>
#include "sputnik/barrier.h"
#include "sputnik/common.h"
#include "sputnik/cuda_utils.h"
#include "sputnik/load_store.h"
#include "sputnik/sddmm/all_reduce.h"
#include "sputnik/sddmm/compute_utils.h"
#include "sputnik/sddmm/cuda_sddmm.h"
#include "sputnik/sddmm/dense_to_reg.h"
#include "sputnik/sddmm/dense_to_shared.h"
#include "sputnik/sddmm/output_tile.h"
#include "sputnik/tiling_utils.h"
#include <ATen/ATen.h>
#include <torch/types.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
namespace sputnik {
namespace {
template <
typename LoadType,
int kBlockItemsY,
int kBlockItemsK,
int kBlockItemsX,
int kBlockWidth,
int kPredicateK = true>
__global__ void __launch_bounds__(kBlockItemsY* kBlockWidth) CudaSddmmKernel2(
int m,
int k,
int n,
const int* __restrict__ row_indices,
const int* __restrict__ row_offsets,
const int* __restrict__ column_indices,
const float* __restrict__ lhs_matrix,
const float* __restrict__ rhs_matrix,
float* __restrict__ output_values,
int nnz) {
static_assert(
(kBlockItemsY * kBlockWidth) % 32 == 0,
"The thread-block size must be divisible by the warp size.");
static_assert(
(kBlockItemsY * kBlockWidth) > 0,
"The thread-block size must be nonzero.");
static_assert(
kBlockItemsK >= kBlockWidth, "k-dimension tile must be >= block width.");
static_assert(
kBlockItemsK % kBlockWidth == 0,
"k-dimension tile size must be divisible by block width.");
static_assert(
kBlockItemsX >= kBlockWidth,
"n-dimension tile size must be >= block width.");
static_assert(
kBlockItemsX % kBlockWidth == 0,
"n-dimension tile size must be divisible by block width.");
typedef TilingUtils<kBlockItemsY, kBlockItemsK, kBlockItemsX> Tiling;
// Calculate this thread block's indices into the M and N dimensions.
int m_index = Tiling::IndexM(), n_index = Tiling::IndexN();
// Threads that work on different m-dim indices are independent. If we're
// out of bounds in the m-dimension we can just return.
if (m_index >= m)
return;
m_index = Load(row_indices + m_index);
// Load the row offset and calculate the number of non-zeros in the row.
int row_offset = __ldg(row_offsets + m_index);
int nonzeros = __ldg(row_offsets + m_index + 1) - row_offset;
// If this thread block has no nonzeros in the row to process, exit early.
if (n_index >= nonzeros)
return;
// Calculate the number of nonzeros that this thread block processes and
// substract the x-dim thread index to simplify loop bounds checks.
nonzeros = Min(nonzeros - n_index, kBlockItemsX) - threadIdx.x;
// Shared memory tile for the lhs dense matrix values.
float lhs_fragment[kBlockItemsK / kBlockWidth];
// Shared memory tile for the output column indices.
__shared__ int column_indices_tile_array[kBlockItemsX * kBlockItemsY];
int* column_indices_tile =
TilingUtils<kBlockItemsY, kBlockItemsK, kBlockItemsX>::MaybeOffset(
column_indices_tile_array, kBlockItemsK * threadIdx.y);
// Create a dense-to-shared loader for the lhs matrix.
DenseToShared<LoadType, kBlockItemsK, kBlockWidth> lhs_tile_loader(
k, m_index, lhs_matrix + blockIdx.z * m * k, lhs_fragment);
// Register file fragment for the rhs dense matrix values.
float rhs_fragment[kBlockItemsK * kBlockItemsX / kBlockWidth];
// Create a dense-to-register loader for the rhs matrix.
DenseToReg<LoadType, kBlockItemsK, kBlockItemsX, kBlockWidth> rhs_tile_loader(
k,
row_offset,
n_index,
column_indices,
rhs_matrix + blockIdx.z * n * k,
column_indices_tile,
rhs_fragment);
// Accumulator registers for the partial results. Initialize the
// registers to zero s.t. we can always accumulate in-place.
float accumulator_fragment[kBlockItemsX] = {};
// Helper for managing syncronization between collaborating threads.
Barrier<kBlockItemsY, kBlockWidth> barrier(threadIdx.y);
// Helper for computing tile-level partial matmuls.
ComputeUtils<kBlockItemsK, kBlockItemsX, kBlockWidth> computer(
lhs_fragment, rhs_fragment, accumulator_fragment);
// Registers for the final reduced outputs.
float output_fragment[kBlockItemsX / kBlockWidth];
// Helper to reduce the partial accumulators prior to writing.
AllReduce<LoadType, kBlockItemsX, kBlockWidth> all_reduce(
barrier.ThreadMask(), accumulator_fragment, output_fragment);
// Helper for storing the results to the output.
OutputTile<kBlockItemsX, kBlockWidth> output_tile_storer(
row_offset, n_index, output_fragment, output_values + blockIdx.z * nnz);
//
/// Begin kernel main loop.
//
// Load the column indices for this n-dimension tile.
rhs_tile_loader.LoadColumnIndices(nonzeros);
barrier.Sync();
#pragma nounroll
for (; k >= kBlockItemsK; k -= kBlockItemsK) {
// Load a tile from the dense lhs matrix into smem and sync.
lhs_tile_loader.Load();
// Load a tile from the dense rhs matrix into registers.
rhs_tile_loader.Load();
// Multiply the tiles and accumulate the results.
computer.TileMAC();
}
//
/// Begin k-dimension residue computation.
//
if (kPredicateK) {
// Update the residue size to simplify loop bounds checking. Note
// that `k` is guaranteed to be a multiple of `kValuesPerLoad`.
constexpr int kValuesPerLoad = sizeof(LoadType) / sizeof(float);
k -= threadIdx.x * kValuesPerLoad;
// Load a partial tile from the lhs matrix and sync.
lhs_tile_loader.Residue(k);
// Load a tile from the rhs matrix and compute immediately.
rhs_tile_loader.ResidueAndCompute(k, lhs_fragment, accumulator_fragment);
}
//
/// Cleanup the partial sums across the (sub)warp.
//
all_reduce.Reduce();
//
/// Write the results to the output.
//
output_tile_storer.Store(nonzeros);
}
} // namespace
} // namespace sputnik
template <
typename LoadType,
int kBlockItemsY,
int kBlockItemsK,
int kBlockItemsX,
int kBlockWidth,
int kPredicateK>
hipError_t CudaSddmmEx2(
int m,
int k,
int n,
int nonzeros,
const int* __restrict__ row_indices,
const int* __restrict__ row_offsets,
const int* __restrict__ column_indices,
const float* __restrict__ lhs_matrix,
const float* __restrict__ rhs_matrix,
float* __restrict__ output_values,
hipStream_t stream,
int batch_size) {
dim3 grid_dim(
::ceil(static_cast<float>(m) / kBlockItemsY),
::ceil(static_cast<float>(n) / kBlockItemsX),
batch_size);
dim3 block_dim(kBlockWidth, kBlockItemsY, 1);
hipLaunchKernelGGL(( sputnik::CudaSddmmKernel2<
LoadType,
kBlockItemsY,
kBlockItemsK,
kBlockItemsX,
kBlockWidth,
kPredicateK>), dim3(grid_dim), dim3(block_dim), 0, stream,
m,
k,
n,
row_indices,
row_offsets,
column_indices,
lhs_matrix,
rhs_matrix,
output_values,
nonzeros);
return hipGetLastError();
}
hipError_t CudaSddmm2(
int m,
int k,
int n,
int nonzeros,
const int* __restrict__ row_indices,
const int* __restrict__ row_offsets,
const int* __restrict__ column_indices,
const float* __restrict__ lhs_matrix,
const float* __restrict__ rhs_matrix,
float* __restrict__ output_values,
hipStream_t stream,
int batch_size) {
// If possible, launch a variant that does not include the k-dimension
// residue handling code.
if ((k % 4) == 0) {
if ((k % 32) == 0) {
return CudaSddmmEx2<float4, 4, 32, 32, 8, false>(
m,
k,
n,
nonzeros,
row_indices,
row_offsets,
column_indices,
lhs_matrix,
rhs_matrix,
output_values,
stream,
batch_size);
} else {
return CudaSddmmEx2<float4, 4, 32, 32, 8, true>(
m,
k,
n,
nonzeros,
row_indices,
row_offsets,
column_indices,
lhs_matrix,
rhs_matrix,
output_values,
stream,
batch_size);
}
} else if ((k % 2) == 0) {
return CudaSddmmEx2<float2, 2, 32, 32, 16, true>(
m,
k,
n,
nonzeros,
row_indices,
row_offsets,
column_indices,
lhs_matrix,
rhs_matrix,
output_values,
stream,
batch_size);
} else {
// Scalar kernel.
return CudaSddmmEx2<float, 1, 32, 32, 32, true>(
m,
k,
n,
nonzeros,
row_indices,
row_offsets,
column_indices,
lhs_matrix,
rhs_matrix,
output_values,
stream,
batch_size);
}
}
at::Tensor sddmm_sputnik(
const at::Tensor& a,
const at::Tensor& b,
const at::Tensor& row_indices,
const at::Tensor& row_offsets,
const at::Tensor& column_indices) {
TORCH_CHECK(a.dim() == b.dim());
TORCH_CHECK(a.dim() == 3);
TORCH_CHECK(a.size(0) == b.size(0));
TORCH_CHECK(a.size(2) == b.size(2));
TORCH_CHECK(row_indices.dim() == 1);
TORCH_CHECK(row_offsets.dim() == 1);
TORCH_CHECK(column_indices.dim() == 1);
TORCH_CHECK(a.is_cuda(), "a must be a CUDA tensor");
TORCH_CHECK(b.is_cuda(), "b must be a CUDA tensor");
TORCH_CHECK(row_indices.is_cuda(), "row_indices must be a CUDA tensor");
TORCH_CHECK(row_offsets.is_cuda(), "row_offsets must be a CUDA tensor");
TORCH_CHECK(column_indices.is_cuda(), "column_offsets must be a CUDA tensor");
TORCH_CHECK(a.is_contiguous(), "a must be a contiguous tensor");
TORCH_CHECK(b.is_contiguous(), "b must be a contiguous tensor");
TORCH_CHECK(
row_indices.is_contiguous(), "row_indices must be a contiguous tensor");
TORCH_CHECK(
row_offsets.is_contiguous(), "row_offsets must be a contiguous tensor");
TORCH_CHECK(
column_indices.is_contiguous(),
"column_offsets must be a contiguous tensor");
TORCH_CHECK(!a.is_sparse(), "a must be a dense tensor");
TORCH_CHECK(!b.is_sparse(), "b must be a dense tensor");
TORCH_CHECK(!row_indices.is_sparse(), "row_indices must be a dense tensor");
TORCH_CHECK(!row_offsets.is_sparse(), "row_offsets must be a dense tensor");
TORCH_CHECK(
!column_indices.is_sparse(), "column_offsets must be a dense tensor");
TORCH_CHECK(a.device() == b.device(), "a should be in the same device as b");
TORCH_CHECK(
a.device() == row_indices.device(),
"a should be in the same device as row_indices");
TORCH_CHECK(
a.device() == row_offsets.device(),
"a should be in the same device as row_offsets");
TORCH_CHECK(
a.device() == column_indices.device(),
"a should be in the same device as column_indices");
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
int batch = a.size(0);
int m = a.size(1);
int k = a.size(2);
int n = b.size(1);
int nonzeros = column_indices.size(0);
at::Tensor output = at::empty({batch, nonzeros}, a.options());
AT_CUDA_CHECK(CudaSddmm2(
m,
k,
n,
nonzeros,
row_indices.data_ptr<int>(),
row_offsets.data_ptr<int>(),
column_indices.data_ptr<int>(),
a.data_ptr<float>(),
b.data_ptr<float>(),
output.data_ptr<float>(),
stream,
batch));
return output;
}
TORCH_LIBRARY_IMPL(xformers, CUDA, m) {
m.impl(
TORCH_SELECTIVE_NAME("xformers::sddmm_sputnik"), TORCH_FN(sddmm_sputnik));
}
| a05045c4cb0a155bb5bec666f73e605fa27807dc.cu | // This file was modified from sputnik to implement batch support for
// sddmm directly in the kernels
// Copyright 2020 The Sputnik Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <cmath>
#include "sputnik/barrier.h"
#include "sputnik/common.h"
#include "sputnik/cuda_utils.h"
#include "sputnik/load_store.h"
#include "sputnik/sddmm/all_reduce.h"
#include "sputnik/sddmm/compute_utils.h"
#include "sputnik/sddmm/cuda_sddmm.h"
#include "sputnik/sddmm/dense_to_reg.h"
#include "sputnik/sddmm/dense_to_shared.h"
#include "sputnik/sddmm/output_tile.h"
#include "sputnik/tiling_utils.h"
#include <ATen/ATen.h>
#include <torch/types.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
namespace sputnik {
namespace {
template <
typename LoadType,
int kBlockItemsY,
int kBlockItemsK,
int kBlockItemsX,
int kBlockWidth,
int kPredicateK = true>
__global__ void __launch_bounds__(kBlockItemsY* kBlockWidth) CudaSddmmKernel2(
int m,
int k,
int n,
const int* __restrict__ row_indices,
const int* __restrict__ row_offsets,
const int* __restrict__ column_indices,
const float* __restrict__ lhs_matrix,
const float* __restrict__ rhs_matrix,
float* __restrict__ output_values,
int nnz) {
static_assert(
(kBlockItemsY * kBlockWidth) % 32 == 0,
"The thread-block size must be divisible by the warp size.");
static_assert(
(kBlockItemsY * kBlockWidth) > 0,
"The thread-block size must be nonzero.");
static_assert(
kBlockItemsK >= kBlockWidth, "k-dimension tile must be >= block width.");
static_assert(
kBlockItemsK % kBlockWidth == 0,
"k-dimension tile size must be divisible by block width.");
static_assert(
kBlockItemsX >= kBlockWidth,
"n-dimension tile size must be >= block width.");
static_assert(
kBlockItemsX % kBlockWidth == 0,
"n-dimension tile size must be divisible by block width.");
typedef TilingUtils<kBlockItemsY, kBlockItemsK, kBlockItemsX> Tiling;
// Calculate this thread block's indices into the M and N dimensions.
int m_index = Tiling::IndexM(), n_index = Tiling::IndexN();
// Threads that work on different m-dim indices are independent. If we're
// out of bounds in the m-dimension we can just return.
if (m_index >= m)
return;
m_index = Load(row_indices + m_index);
// Load the row offset and calculate the number of non-zeros in the row.
int row_offset = __ldg(row_offsets + m_index);
int nonzeros = __ldg(row_offsets + m_index + 1) - row_offset;
// If this thread block has no nonzeros in the row to process, exit early.
if (n_index >= nonzeros)
return;
// Calculate the number of nonzeros that this thread block processes and
// substract the x-dim thread index to simplify loop bounds checks.
nonzeros = Min(nonzeros - n_index, kBlockItemsX) - threadIdx.x;
// Shared memory tile for the lhs dense matrix values.
float lhs_fragment[kBlockItemsK / kBlockWidth];
// Shared memory tile for the output column indices.
__shared__ int column_indices_tile_array[kBlockItemsX * kBlockItemsY];
int* column_indices_tile =
TilingUtils<kBlockItemsY, kBlockItemsK, kBlockItemsX>::MaybeOffset(
column_indices_tile_array, kBlockItemsK * threadIdx.y);
// Create a dense-to-shared loader for the lhs matrix.
DenseToShared<LoadType, kBlockItemsK, kBlockWidth> lhs_tile_loader(
k, m_index, lhs_matrix + blockIdx.z * m * k, lhs_fragment);
// Register file fragment for the rhs dense matrix values.
float rhs_fragment[kBlockItemsK * kBlockItemsX / kBlockWidth];
// Create a dense-to-register loader for the rhs matrix.
DenseToReg<LoadType, kBlockItemsK, kBlockItemsX, kBlockWidth> rhs_tile_loader(
k,
row_offset,
n_index,
column_indices,
rhs_matrix + blockIdx.z * n * k,
column_indices_tile,
rhs_fragment);
// Accumulator registers for the partial results. Initialize the
// registers to zero s.t. we can always accumulate in-place.
float accumulator_fragment[kBlockItemsX] = {};
// Helper for managing syncronization between collaborating threads.
Barrier<kBlockItemsY, kBlockWidth> barrier(threadIdx.y);
// Helper for computing tile-level partial matmuls.
ComputeUtils<kBlockItemsK, kBlockItemsX, kBlockWidth> computer(
lhs_fragment, rhs_fragment, accumulator_fragment);
// Registers for the final reduced outputs.
float output_fragment[kBlockItemsX / kBlockWidth];
// Helper to reduce the partial accumulators prior to writing.
AllReduce<LoadType, kBlockItemsX, kBlockWidth> all_reduce(
barrier.ThreadMask(), accumulator_fragment, output_fragment);
// Helper for storing the results to the output.
OutputTile<kBlockItemsX, kBlockWidth> output_tile_storer(
row_offset, n_index, output_fragment, output_values + blockIdx.z * nnz);
//
/// Begin kernel main loop.
//
// Load the column indices for this n-dimension tile.
rhs_tile_loader.LoadColumnIndices(nonzeros);
barrier.Sync();
#pragma nounroll
for (; k >= kBlockItemsK; k -= kBlockItemsK) {
// Load a tile from the dense lhs matrix into smem and sync.
lhs_tile_loader.Load();
// Load a tile from the dense rhs matrix into registers.
rhs_tile_loader.Load();
// Multiply the tiles and accumulate the results.
computer.TileMAC();
}
//
/// Begin k-dimension residue computation.
//
if (kPredicateK) {
// Update the residue size to simplify loop bounds checking. Note
// that `k` is guaranteed to be a multiple of `kValuesPerLoad`.
constexpr int kValuesPerLoad = sizeof(LoadType) / sizeof(float);
k -= threadIdx.x * kValuesPerLoad;
// Load a partial tile from the lhs matrix and sync.
lhs_tile_loader.Residue(k);
// Load a tile from the rhs matrix and compute immediately.
rhs_tile_loader.ResidueAndCompute(k, lhs_fragment, accumulator_fragment);
}
//
/// Cleanup the partial sums across the (sub)warp.
//
all_reduce.Reduce();
//
/// Write the results to the output.
//
output_tile_storer.Store(nonzeros);
}
} // namespace
} // namespace sputnik
template <
typename LoadType,
int kBlockItemsY,
int kBlockItemsK,
int kBlockItemsX,
int kBlockWidth,
int kPredicateK>
cudaError_t CudaSddmmEx2(
int m,
int k,
int n,
int nonzeros,
const int* __restrict__ row_indices,
const int* __restrict__ row_offsets,
const int* __restrict__ column_indices,
const float* __restrict__ lhs_matrix,
const float* __restrict__ rhs_matrix,
float* __restrict__ output_values,
cudaStream_t stream,
int batch_size) {
dim3 grid_dim(
std::ceil(static_cast<float>(m) / kBlockItemsY),
std::ceil(static_cast<float>(n) / kBlockItemsX),
batch_size);
dim3 block_dim(kBlockWidth, kBlockItemsY, 1);
sputnik::CudaSddmmKernel2<
LoadType,
kBlockItemsY,
kBlockItemsK,
kBlockItemsX,
kBlockWidth,
kPredicateK><<<grid_dim, block_dim, 0, stream>>>(
m,
k,
n,
row_indices,
row_offsets,
column_indices,
lhs_matrix,
rhs_matrix,
output_values,
nonzeros);
return cudaGetLastError();
}
cudaError_t CudaSddmm2(
int m,
int k,
int n,
int nonzeros,
const int* __restrict__ row_indices,
const int* __restrict__ row_offsets,
const int* __restrict__ column_indices,
const float* __restrict__ lhs_matrix,
const float* __restrict__ rhs_matrix,
float* __restrict__ output_values,
cudaStream_t stream,
int batch_size) {
// If possible, launch a variant that does not include the k-dimension
// residue handling code.
if ((k % 4) == 0) {
if ((k % 32) == 0) {
return CudaSddmmEx2<float4, 4, 32, 32, 8, false>(
m,
k,
n,
nonzeros,
row_indices,
row_offsets,
column_indices,
lhs_matrix,
rhs_matrix,
output_values,
stream,
batch_size);
} else {
return CudaSddmmEx2<float4, 4, 32, 32, 8, true>(
m,
k,
n,
nonzeros,
row_indices,
row_offsets,
column_indices,
lhs_matrix,
rhs_matrix,
output_values,
stream,
batch_size);
}
} else if ((k % 2) == 0) {
return CudaSddmmEx2<float2, 2, 32, 32, 16, true>(
m,
k,
n,
nonzeros,
row_indices,
row_offsets,
column_indices,
lhs_matrix,
rhs_matrix,
output_values,
stream,
batch_size);
} else {
// Scalar kernel.
return CudaSddmmEx2<float, 1, 32, 32, 32, true>(
m,
k,
n,
nonzeros,
row_indices,
row_offsets,
column_indices,
lhs_matrix,
rhs_matrix,
output_values,
stream,
batch_size);
}
}
at::Tensor sddmm_sputnik(
const at::Tensor& a,
const at::Tensor& b,
const at::Tensor& row_indices,
const at::Tensor& row_offsets,
const at::Tensor& column_indices) {
TORCH_CHECK(a.dim() == b.dim());
TORCH_CHECK(a.dim() == 3);
TORCH_CHECK(a.size(0) == b.size(0));
TORCH_CHECK(a.size(2) == b.size(2));
TORCH_CHECK(row_indices.dim() == 1);
TORCH_CHECK(row_offsets.dim() == 1);
TORCH_CHECK(column_indices.dim() == 1);
TORCH_CHECK(a.is_cuda(), "a must be a CUDA tensor");
TORCH_CHECK(b.is_cuda(), "b must be a CUDA tensor");
TORCH_CHECK(row_indices.is_cuda(), "row_indices must be a CUDA tensor");
TORCH_CHECK(row_offsets.is_cuda(), "row_offsets must be a CUDA tensor");
TORCH_CHECK(column_indices.is_cuda(), "column_offsets must be a CUDA tensor");
TORCH_CHECK(a.is_contiguous(), "a must be a contiguous tensor");
TORCH_CHECK(b.is_contiguous(), "b must be a contiguous tensor");
TORCH_CHECK(
row_indices.is_contiguous(), "row_indices must be a contiguous tensor");
TORCH_CHECK(
row_offsets.is_contiguous(), "row_offsets must be a contiguous tensor");
TORCH_CHECK(
column_indices.is_contiguous(),
"column_offsets must be a contiguous tensor");
TORCH_CHECK(!a.is_sparse(), "a must be a dense tensor");
TORCH_CHECK(!b.is_sparse(), "b must be a dense tensor");
TORCH_CHECK(!row_indices.is_sparse(), "row_indices must be a dense tensor");
TORCH_CHECK(!row_offsets.is_sparse(), "row_offsets must be a dense tensor");
TORCH_CHECK(
!column_indices.is_sparse(), "column_offsets must be a dense tensor");
TORCH_CHECK(a.device() == b.device(), "a should be in the same device as b");
TORCH_CHECK(
a.device() == row_indices.device(),
"a should be in the same device as row_indices");
TORCH_CHECK(
a.device() == row_offsets.device(),
"a should be in the same device as row_offsets");
TORCH_CHECK(
a.device() == column_indices.device(),
"a should be in the same device as column_indices");
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
int batch = a.size(0);
int m = a.size(1);
int k = a.size(2);
int n = b.size(1);
int nonzeros = column_indices.size(0);
at::Tensor output = at::empty({batch, nonzeros}, a.options());
AT_CUDA_CHECK(CudaSddmm2(
m,
k,
n,
nonzeros,
row_indices.data_ptr<int>(),
row_offsets.data_ptr<int>(),
column_indices.data_ptr<int>(),
a.data_ptr<float>(),
b.data_ptr<float>(),
output.data_ptr<float>(),
stream,
batch));
return output;
}
TORCH_LIBRARY_IMPL(xformers, CUDA, m) {
m.impl(
TORCH_SELECTIVE_NAME("xformers::sddmm_sputnik"), TORCH_FN(sddmm_sputnik));
}
|
d3499364b29db8b17c0973edaea051ff4a566e11.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Bprop2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *layer1 = NULL;
hipMalloc(&layer1, XSIZE*YSIZE);
float *dsyn2 = NULL;
hipMalloc(&dsyn2, XSIZE*YSIZE);
const float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
const float alpha = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Bprop2), dim3(gridBlock),dim3(threadBlock), 0, 0, layer1,dsyn2,out,alpha);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Bprop2), dim3(gridBlock),dim3(threadBlock), 0, 0, layer1,dsyn2,out,alpha);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Bprop2), dim3(gridBlock),dim3(threadBlock), 0, 0, layer1,dsyn2,out,alpha);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d3499364b29db8b17c0973edaea051ff4a566e11.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Bprop2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *layer1 = NULL;
cudaMalloc(&layer1, XSIZE*YSIZE);
float *dsyn2 = NULL;
cudaMalloc(&dsyn2, XSIZE*YSIZE);
const float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
const float alpha = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Bprop2<<<gridBlock,threadBlock>>>(layer1,dsyn2,out,alpha);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Bprop2<<<gridBlock,threadBlock>>>(layer1,dsyn2,out,alpha);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Bprop2<<<gridBlock,threadBlock>>>(layer1,dsyn2,out,alpha);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
54d4e955b5003cea5df761a59e03ea82adc6fb86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2015 by Justin MacCallum, Alberto Perez, Ken Dill
All rights reserved
*/
#include "assert.h"
#define ELEM_SWAP(a,b) { int t=(a);(a)=(b);(b)=t; }
__device__
float quick_select_float(const float* energy, int *index, int nelems, int select) {
int low, high, middle, ll, hh;
low = 0;
high = nelems - 1;
for (;;) {
if (high <= low) { /* One element only */
return energy[index[select]];
}
if (high == low + 1) { /* Two elements only */
if (energy[index[low]] > energy[index[high]])
ELEM_SWAP(index[low], index[high]);
return energy[index[select]];
}
/* Find median of low, middle and high items; swap into position low */
middle = (low + high) / 2;
if (energy[index[middle]] > energy[index[high]]) ELEM_SWAP(index[middle], index[high]);
if (energy[index[low]] > energy[index[high]]) ELEM_SWAP(index[low], index[high]);
if (energy[index[middle]] > energy[index[low]]) ELEM_SWAP(index[middle], index[low]);
/* Swap low item (now in position middle) into position (low+1) */
ELEM_SWAP(index[middle], index[low+1]);
/* Nibble from each end towards middle, swapping items when stuck */
ll = low + 1;
hh = high;
for (;;) {
do ll++; while (energy[index[low]] > energy[index[ll]]);
do hh--; while (energy[index[hh]] > energy[index[low]]);
if (hh < ll)
break;
ELEM_SWAP(index[ll], index[hh]);
}
/* Swap middle item (in position low) back into correct position */
ELEM_SWAP(index[low], index[hh]);
/* Re-set active partition */
if (hh <= select)
low = ll;
if (hh >= select)
high = hh - 1;
}
}
#undef ELEM_SWAP
__device__ void computeTorsionAngle(const real4* __restrict__ posq, int atom_i, int atom_j, int atom_k, int atom_l,
float3& r_ij, float3& r_kj, float3& r_kl, float3& m, float3& n,
float& len_r_kj, float& len_m, float& len_n, float& phi) {
// compute vectors
r_ij = trimTo3(posq[atom_j] - posq[atom_i]);
r_kj = trimTo3(posq[atom_j] - posq[atom_k]);
r_kl = trimTo3(posq[atom_l] - posq[atom_k]);
// compute normal vectors
m = cross(r_ij, r_kj);
n = cross(r_kj, r_kl);
// compute lengths
len_r_kj = sqrt(dot(r_kj, r_kj));
len_m = sqrt(dot(m, m));
len_n = sqrt(dot(n, n));
// compute angle phi
float x = dot(m / len_m, n / len_n);
float y = dot(cross(m / len_m, r_kj / len_r_kj), n / len_n);
phi = atan2(y, x) * 180. / 3.141592654;
}
__device__ void computeTorsionForce(const float dEdPhi, const float3& r_ij, const float3& r_kj, const float3& r_kl,
const float3& m, const float3& n, const float len_r_kj, const float len_m, const float len_n,
float3& F_i, float3& F_j, float3& F_k, float3& F_l) {
F_i = -180. / 3.141592654 * dEdPhi * len_r_kj * m / (len_m * len_m);
F_l = 180. / 3.141592654 * dEdPhi * len_r_kj * n / (len_n * len_n);
F_j = -F_i + dot(r_ij, r_kj) / (len_r_kj * len_r_kj) * F_i - dot(r_kl, r_kj) / (len_r_kj * len_r_kj) * F_l;
F_k = -F_l - dot(r_ij, r_kj) / (len_r_kj * len_r_kj) * F_i + dot(r_kl, r_kj) / (len_r_kj * len_r_kj) * F_l;
}
extern "C" __global__ void computeDistRest(
const real4* __restrict__ posq, // positions and charges
const int2* __restrict__ atomIndices, // pair of atom indices
const float4* __restrict__ distanceBounds, // r1, r2, r3, r4
const float* __restrict__ forceConstants, // k
int* __restrict__ indexToGlobal, // array of indices into global arrays
float* __restrict__ energies, // global array of restraint energies
float3* __restrict__ forceBuffer, // temporary buffer to hold the force
const int numRestraints) {
for (int index=blockIdx.x*blockDim.x+threadIdx.x; index<numRestraints; index+=blockDim.x*gridDim.x) {
// get my global index
const int globalIndex = indexToGlobal[index];
// get the distances
const float r1 = distanceBounds[index].x;
const float r2 = distanceBounds[index].y;
const float r3 = distanceBounds[index].z;
const float r4 = distanceBounds[index].w;
// get the force constant
const float k = forceConstants[index];
// get atom indices and compute distance
int atomIndexA = atomIndices[index].x;
int atomIndexB = atomIndices[index].y;
real4 delta = posq[atomIndexA] - posq[atomIndexB];
real distSquared = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real r = SQRT(distSquared);
// compute force and energy
float energy = 0.0;
float dEdR = 0.0;
float diff = 0.0;
float diff2 = 0.0;
float3 f;
if(r < r1) {
energy = k * (r - r1) * (r1 - r2) + 0.5 * k * (r1 - r2) * (r1 - r2);
dEdR = k * (r1 - r2);
}
else if(r < r2) {
diff = r - r2;
diff2 = diff * diff;
energy = 0.5 * k * diff2;
dEdR = k * diff;
}
else if(r < r3) {
dEdR = 0.0;
energy = 0.0;
}
else if(r < r4) {
diff = r - r3;
diff2 = diff * diff;
energy = 0.5 * k * diff2;
dEdR = k * diff;
}
else {
energy = k * (r - r4) * (r4 - r3) + 0.5 * k * (r4 - r3) * (r4 - r3);
dEdR = k * (r4 - r3);
}
assert(isfinite(energy));
// store force into local buffer
if (r > 0) {
f.x = delta.x * dEdR / r;
f.y = delta.y * dEdR / r;
f.z = delta.z * dEdR / r;
} else {
f.x = 0.0;
f.y = 0.0;
f.z = 0.0;
}
forceBuffer[index] = f;
// store energy into global buffer
energies[globalIndex] = energy;
}
}
extern "C" __global__ void computeHyperbolicDistRest(
const real4* __restrict__ posq, // positions and charges
const int2* __restrict__ atomIndices, // pair of atom indices
const float4* __restrict__ distanceBounds, // r1, r2, r3, r4
const float4* __restrict__ params, // k1, k2, a, b
int* __restrict__ indexToGlobal, // array of indices into global arrays
float* __restrict__ energies, // global array of restraint energies
float3* __restrict__ forceBuffer, // temporary buffer to hold the force
const int numRestraints) {
for (int index=blockIdx.x*blockDim.x+threadIdx.x; index<numRestraints; index+=blockDim.x*gridDim.x) {
// get my global index
const int globalIndex = indexToGlobal[index];
// get the distances
const float r1 = distanceBounds[index].x;
const float r2 = distanceBounds[index].y;
const float r3 = distanceBounds[index].z;
const float r4 = distanceBounds[index].w;
// get the parameters
const float k1 = params[index].x;
const float k2 = params[index].y;
const float a = params[index].z;
const float b = params[index].w;
// get atom indices and compute distance
int atomIndexA = atomIndices[index].x;
int atomIndexB = atomIndices[index].y;
real4 delta = posq[atomIndexA] - posq[atomIndexB];
real distSquared = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real r = SQRT(distSquared);
// compute force and energy
float energy = 0.0;
float dEdR = 0.0;
float diff = 0.0;
float diff2 = 0.0;
float3 f;
if(r < r1) {
energy = k1 * (r - r1) * (r1 - r2) + 0.5 * k1 * (r1 - r2) * (r1 - r2);
dEdR = k1 * (r1 - r2);
}
else if(r < r2) {
diff = r - r2;
diff2 = diff * diff;
energy = 0.5 * k1 * diff2;
dEdR = k1 * diff;
}
else if(r < r3) {
dEdR = 0.0;
energy = 0.0;
}
else if(r < r4) {
diff = r - r3;
diff2 = diff * diff;
energy = 0.5 * k2 * diff2;
dEdR = k2 * diff;
}
else {
energy = 0.5 * k2 * (b / (r - r3) + a);
dEdR = -0.5 * b * k2 / (r - r3) / (r - r3);
}
// store force into local buffer
if (r > 0) {
f.x = delta.x * dEdR / r;
f.y = delta.y * dEdR / r;
f.z = delta.z * dEdR / r;
} else {
f.x = 0.0;
f.y = 0.0;
f.z = 0.0;
}
forceBuffer[index] = f;
assert(isfinite(energy));
// store energy into global buffer
energies[globalIndex] = energy;
}
}
extern "C" __global__ void computeTorsionRest(
const real4* __restrict__ posq, // positions and charges
const int4* __restrict__ atomIndices, // indices of atom_{i,j,k,l}
const float3* __restrict__ params, // phi, deltaPhi, forceConstant
int* __restrict__ indexToGlobal, // array of indices into global arrays
float* __restrict__ energies, // global array of restraint energies
float3* __restrict__ forceBuffer, // temporary buffer to hold the force
// forceBuffer[index*4] -> atom_i
// forceBuffer[index*4 + 3] -> atom_l
const int numRestraints) {
for (int index=blockIdx.x*blockDim.x+threadIdx.x; index<numRestraints; index+=gridDim.x*blockDim.x) {
// get my global index
int globalIndex = indexToGlobal[index];
// get the atom indices
int4 indices = atomIndices[index];
int atom_i = indices.x;
int atom_j = indices.y;
int atom_k = indices.z;
int atom_l = indices.w;
// compute the angle and related quantities
float3 r_ij, r_kj, r_kl;
float3 m, n;
float len_r_kj;
float len_m;
float len_n;
float phi;
computeTorsionAngle(posq, atom_i, atom_j, atom_k, atom_l,
r_ij, r_kj, r_kl, m, n, len_r_kj, len_m, len_n, phi);
// compute E and dE/dphi
float phiEquil = params[index].x;
float phiDelta = params[index].y;
float forceConst = params[index].z;
float phiDiff = phi - phiEquil;
if (phiDiff < -180.) {
phiDiff += 360.;
} else if (phiDiff > 180.) {
phiDiff -= 360.;
}
float energy = 0.0;
float dEdPhi = 0.0;
if (phiDiff < -phiDelta) {
energy = 0.5 * forceConst * (phiDiff + phiDelta) * (phiDiff + phiDelta);
dEdPhi = forceConst * (phiDiff + phiDelta);
}
else if(phiDiff > phiDelta) {
energy = 0.5 * forceConst * (phiDiff - phiDelta) * (phiDiff - phiDelta);
dEdPhi = forceConst * (phiDiff - phiDelta);
}
else{
energy = 0.0;
dEdPhi = 0.0;
}
assert(isfinite(energy));
energies[globalIndex] = energy;
computeTorsionForce(dEdPhi, r_ij, r_kj, r_kl, m, n, len_r_kj, len_m, len_n,
forceBuffer[4 * index + 0], forceBuffer[4 * index + 1],
forceBuffer[4 * index + 2], forceBuffer[4 * index + 3]);
}
}
extern "C" __global__ void computeDistProfileRest(
const real4* __restrict__ posq, // positions and charges
const int2* __restrict__ atomIndices, // pair of atom indices
const float2* __restrict__ distRanges, // upper and lower bounds of spline
const int* __restrict__ nBins, // number of bins
const float4* __restrict__ splineParams, // a0, a1, a2, a3
const int2* __restrict__ paramBounds, // upper and lower bounds for each spline
const float* __restrict__ scaleFactor, // scale factor for energies and forces
const int* __restrict__ indexToGlobal, // index of this restraint in the global array
float* __restrict__ restraintEnergies, // global energy of each restraint
float3* __restrict__ restraintForce, // cache the forces for application later
const int numRestraints ) {
for (int index=blockIdx.x*blockDim.x+threadIdx.x; index<numRestraints; index+=blockDim.x*gridDim.x) {
// get my global index
int globalIndex = indexToGlobal[index];
// get atom indices and compute distance
int atomIndexA = atomIndices[index].x;
int atomIndexB = atomIndices[index].y;
real4 delta = posq[atomIndexA] - posq[atomIndexB];
real distSquared = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real r = SQRT(distSquared);
// compute bin
int bin = (int)( floor((r - distRanges[index].x) / (distRanges[index].y - distRanges[index].x) * nBins[index]) );
// compute the force and energy
float energy = 0.0;
float dEdR = 0.0;
float binWidth = (distRanges[index].y - distRanges[index].x) / nBins[index];
if (bin < 0){
energy = scaleFactor[index] * splineParams[paramBounds[index].x].x;
}
else if (bin >= nBins[index]) {
energy = scaleFactor[index] * (splineParams[paramBounds[index].y - 1].x +
splineParams[paramBounds[index].y - 1].y +
splineParams[paramBounds[index].y - 1].z +
splineParams[paramBounds[index].y - 1].w);
}
else {
float t = (r - bin * binWidth + distRanges[index].x) / binWidth;
float a0 = splineParams[ paramBounds[index].x + bin ].x;
float a1 = splineParams[ paramBounds[index].x + bin ].y;
float a2 = splineParams[ paramBounds[index].x + bin ].z;
float a3 = splineParams[ paramBounds[index].x + bin ].w;
energy = scaleFactor[index] * (a0 + a1 * t + a2 * t * t + a3 * t * t * t);
dEdR = scaleFactor[index] * (a1 + 2.0 * a2 * t + 3.0 * a3 * t * t) / binWidth;
}
assert(isfinite(energy));
// store force into local buffer
float3 f;
f.x = delta.x * dEdR / r;
f.y = delta.y * dEdR / r;
f.z = delta.z * dEdR / r;
restraintForce[index] = f;
// store energy into global buffer
restraintEnergies[globalIndex] = energy;
}
}
extern "C" __global__ void computeTorsProfileRest(
const real4* __restrict__ posq, // positions and charges
const int4* __restrict__ atomIndices0, // i,j,k,l for torsion 0
const int4* __restrict__ atomIndices1, // i,j,k,l for torsion 1
const int* __restrict__ nBins, // number of bins
const float4* __restrict__ params0, // a0 - a3
const float4* __restrict__ params1, // a4 - a7
const float4* __restrict__ params2, // a8 - a11
const float4* __restrict__ params3, // a12 - a15
const int2* __restrict__ paramBounds, // upper and lower bounds for each spline
const float* __restrict__ scaleFactor, // scale factor for energies and forces
const int* __restrict__ indexToGlobal, // index of this restraint in the global array
float* __restrict__ restraintEnergies, // global energy of each restraint
float3* __restrict__ forceBuffer, // cache the forces for application later
const int numRestraints ) {
for (int index=blockIdx.x*blockDim.x+threadIdx.x; index<numRestraints; index+=gridDim.x*blockDim.x) {
// get my global index
int globalIndex = indexToGlobal[index];
// compute phi
int phi_atom_i = atomIndices0[index].x;
int phi_atom_j = atomIndices0[index].y;
int phi_atom_k = atomIndices0[index].z;
int phi_atom_l = atomIndices0[index].w;
float3 phi_r_ij, phi_r_kj, phi_r_kl;
float3 phi_m, phi_n;
float phi_len_r_kj;
float phi_len_m;
float phi_len_n;
float phi;
computeTorsionAngle(posq, phi_atom_i, phi_atom_j, phi_atom_k, phi_atom_l,
phi_r_ij, phi_r_kj, phi_r_kl, phi_m, phi_n, phi_len_r_kj, phi_len_m, phi_len_n, phi);
// compute psi
int psi_atom_i = atomIndices1[index].x;
int psi_atom_j = atomIndices1[index].y;
int psi_atom_k = atomIndices1[index].z;
int psi_atom_l = atomIndices1[index].w;
float3 psi_r_ij, psi_r_kj, psi_r_kl;
float3 psi_m, psi_n;
float psi_len_r_kj;
float psi_len_m;
float psi_len_n;
float psi;
computeTorsionAngle(posq, psi_atom_i, psi_atom_j, psi_atom_k, psi_atom_l,
psi_r_ij, psi_r_kj, psi_r_kl, psi_m, psi_n, psi_len_r_kj, psi_len_m, psi_len_n, psi);
// compute bin indices
int i = (int)(floor((phi + 180.)/360. * nBins[index]));
int j = (int)(floor((psi + 180.)/360. * nBins[index]));
if (i >= nBins[index]) {
i = 0;
phi -= 360.;
}
if (i < 0) {
i = nBins[index] - 1;
phi += 360.;
}
if (j >= nBins[index]) {
j = 0;
psi -= 360.;
}
if (j < 0) {
j = nBins[index] - 1;
psi += 360.;
}
float delta = 360. / nBins[index];
float u = (phi - i * delta + 180.) / delta;
float v = (psi - j * delta + 180.) / delta;
int pi = paramBounds[index].x + i * nBins[index] + j;
float energy = params0[pi].x + params0[pi].y * v + params0[pi].z * v*v + params0[pi].w * v*v*v +
params1[pi].x * u + params1[pi].y * u*v + params1[pi].z * u*v*v + params1[pi].w * u*v*v*v +
params2[pi].x * u*u + params2[pi].y * u*u*v + params2[pi].z * u*u*v*v + params2[pi].w * u*u*v*v*v +
params3[pi].x * u*u*u + params3[pi].y * u*u*u*v + params3[pi].z * u*u*u*v*v + params3[pi].w * u*u*u*v*v*v;
energy = energy * scaleFactor[index];
assert(isfinite(energy));
float dEdPhi = params1[pi].x + params1[pi].y * v + params1[pi].z * v*v + params1[pi].w * v*v*v +
params2[pi].x * 2*u + params2[pi].y * 2*u*v + params2[pi].z * 2*u*v*v + params2[pi].w * 2*u*v*v*v +
params3[pi].x * 3*u*u + params3[pi].y * 3*u*u*v + params3[pi].z * 3*u*u*v*v + params3[pi].w * 3*u*u*v*v*v;
dEdPhi = dEdPhi * scaleFactor[index] / delta;
float dEdPsi = params0[pi].y + params0[pi].z * 2*v + params0[pi].w * 3*v*v +
params1[pi].y * u + params1[pi].z * u*2*v + params1[pi].w * u*3*v*v +
params2[pi].y * u*u + params2[pi].z * u*u*2*v + params2[pi].w * u*u*3*v*v +
params3[pi].y * u*u*u + params3[pi].z * u*u*u*2*v + params3[pi].w * u*u*u*3*v*v;
dEdPsi = dEdPsi * scaleFactor[index] / delta;
restraintEnergies[globalIndex] = energy;
computeTorsionForce(dEdPhi, phi_r_ij, phi_r_kj, phi_r_kl, phi_m, phi_n, phi_len_r_kj, phi_len_m, phi_len_n,
forceBuffer[8 * index + 0], forceBuffer[8 * index + 1],
forceBuffer[8 * index + 2], forceBuffer[8 * index + 3]);
computeTorsionForce(dEdPsi, psi_r_ij, psi_r_kj, psi_r_kl, psi_m, psi_n, psi_len_r_kj, psi_len_m, psi_len_n,
forceBuffer[8 * index + 4], forceBuffer[8 * index + 5],
forceBuffer[8 * index + 6], forceBuffer[8 * index + 7]);
}
}
extern "C" __global__ void evaluateAndActivate(
const int numGroups,
const int* __restrict__ numActiveArray,
const int2* __restrict__ boundsArray,
const int* __restrict__ pristineIndexArray,
int* __restrict__ tempIndexArray,
const float* __restrict__ energyArray,
float* __restrict__ activeArray,
float* __restrict__ targetEnergyArray)
{
// This kernel computes which restraints are active within each group.
// It uses "warp-level" programming to do this, where each warp within
// a threadblock computes the results for a single group. All threads
// within each group are implicity synchronized at the hardware
// level.
// These are runtime parameters set tby the C++ code.
const int groupsPerBlock = GROUPSPERBLOCK;
const int maxGroupSize = MAXGROUPSIZE;
// Because each warp is computing a separate interaction, we need to
// keep track of which block we are acting on and our index within
// that warp.
const int groupOffsetInBlock = threadIdx.x / 32;
const int threadOffsetInWarp = threadIdx.x % 32;
// We store the energies and indices into scratch buffers. These scratch
// buffers are also used for reductions within each warp.
extern __shared__ volatile char scratch[];
volatile float* warpScratchEnergy = (float*)&scratch[groupOffsetInBlock*maxGroupSize*(sizeof(float)+sizeof(int))];
volatile int* warpScratchIndices = (int*)&scratch[groupOffsetInBlock*maxGroupSize*(sizeof(float)+sizeof(int)) +
maxGroupSize*sizeof(float)];
volatile float* warpReductionBuffer = (float*)&scratch[groupOffsetInBlock*32*sizeof(float)];
// each warp loads the energies and indices for a group
for (int groupIndex=groupsPerBlock*blockIdx.x+groupOffsetInBlock; groupIndex<numGroups; groupIndex+=groupsPerBlock*gridDim.x) {
const int numActive = numActiveArray[groupIndex];
const int start = boundsArray[groupIndex].x;
const int end = boundsArray[groupIndex].y;
const int length = end - start;
const bool applyAll = (numActive == length);
// copy the energies to shared memory and setup indices
if (!applyAll) {
for(int i=threadOffsetInWarp; i<length; i+=32) {
const float energy = energyArray[pristineIndexArray[i + start]];
assert(isfinite(energy));
warpScratchIndices[i] = i;
warpScratchEnergy[i] = energy;
}
}
// now, we run the quick select algorithm.
// this is not parallelized, so we only run it on one thread
// per block.
if (threadOffsetInWarp==0) {
float energyCut = 0.0;
if (!applyAll) {
energyCut = quick_select_float((const float*)warpScratchEnergy, (int *)warpScratchIndices, length, numActive-1);
}
else {
energyCut = 9.99e99;
}
warpScratchEnergy[0] = energyCut;
}
// now we're back on all threads again
float energyCut = warpScratchEnergy[0];
float thisActive = 0.0;
float thisEnergy = 0.0;
// we are going to start writing to warpReductionBuffer,
// which may overlap with the warpScratch* buffers, so
// we need to make sure that all threads are done first.
__syncthreads();
// reset the reduction buffers to zero
warpReductionBuffer[threadOffsetInWarp] = 0.0;
// sum up the energy for each restraint
for(int i=threadOffsetInWarp+start; i<end; i+=32) {
thisEnergy = energyArray[pristineIndexArray[i]];
thisActive = (float)(thisEnergy <= energyCut);
activeArray[pristineIndexArray[i]] = thisActive;
warpReductionBuffer[threadOffsetInWarp] += thisActive * thisEnergy;
}
// now we do a parallel reduction within each warp
int totalThreads = 32;
int index2 = 0;
while (totalThreads > 1) {
int halfPoint = (totalThreads >> 1);
if (threadOffsetInWarp < halfPoint) {
index2 = threadOffsetInWarp + halfPoint;
warpReductionBuffer[threadOffsetInWarp] += warpReductionBuffer[index2];
}
totalThreads = halfPoint;
}
// now store the energy for this group
if (threadOffsetInWarp == 0) {
targetEnergyArray[groupIndex] = warpReductionBuffer[0];
assert(isfinite(warpReductionBuffer[0]));
}
// make sure we're all done before we start again
__syncthreads();
}
}
__device__ void findMinMax(int length, const float* energyArray, float* minBuffer, float* maxBuffer) {
const int tid = threadIdx.x;
float energy;
float min = 9.9e99;
float max = -9.9e99;
// Each thread computes the min and max for it's energies and stores them in the buffers
for (int i=tid; i<length; i+=blockDim.x) {
energy = energyArray[i];
if (energy < min) {
min = energy;
}
if (energy > max) {
max = energy;
}
}
minBuffer[tid] = min;
maxBuffer[tid] = max;
__syncthreads();
// Now we do a parallel reduction
int totalThreads = blockDim.x;
int index2 = 0;
float temp = 0;
while (totalThreads > 1) {
int halfPoint = (totalThreads >> 1);
if (tid < halfPoint) {
index2 = tid + halfPoint;
temp = minBuffer[index2];
if (temp < minBuffer[tid]) {
minBuffer[tid] = temp;
}
temp = maxBuffer[index2];
if (temp > maxBuffer[tid]) {
maxBuffer[tid] = temp;
}
}
__syncthreads();
totalThreads = halfPoint;
}
__syncthreads();
}
extern "C" __global__ void evaluateAndActivateCollections(
const int numCollections,
const int* __restrict__ numActiveArray,
const int2* __restrict__ boundsArray,
const int* __restrict__ indexArray,
const float* __restrict__ energyArray,
float* __restrict__ activeArray)
{
const float TOLERANCE = 1e-4;
const int maxCollectionSize = MAXCOLLECTIONSIZE;
const int tid = threadIdx.x;
const int warp = tid / 32;
const int lane = tid % 32; // which thread are we within this warp
// shared memory:
// energyBuffer: maxCollectionSize floats
// min/max Buffer: gridDim.x floats
// binCounts: gridDim.x ints
extern __shared__ char collectionScratch[];
float* energyBuffer = (float*)&collectionScratch[0];
float* minBuffer = (float*)&collectionScratch[maxCollectionSize*sizeof(float)];
float* maxBuffer = (float*)&collectionScratch[(maxCollectionSize+blockDim.x)*sizeof(float)];
int* binCounts = (int*)&collectionScratch[(maxCollectionSize+2*blockDim.x)*sizeof(float)];
int* bestBin = (int*)&(collectionScratch[(maxCollectionSize + 2 * blockDim.x) * sizeof(float) +
blockDim.x * sizeof(int)]);
for (int collIndex=blockIdx.x; collIndex<numCollections; collIndex+=gridDim.x) {
// we need to find the value of the cutoff energy below, then we will
// activate all groups with lower energy
float energyCutoff = 0.0;
int numActive = numActiveArray[collIndex];
int start = boundsArray[collIndex].x;
int end = boundsArray[collIndex].y;
int length = end - start;
// load the energy buffer for this collection
for (int i=tid; i<length; i+=blockDim.x) {
const float energy = energyArray[indexArray[start + i]];
assert(isfinite(energy));
energyBuffer[i] = energy;
}
__syncthreads();
findMinMax(length, energyBuffer, minBuffer, maxBuffer);
float min = minBuffer[0];
float max = maxBuffer[0];
float delta = max - min;
// All of the energies are the same, so they should all be active.
// Note: we need to break out here in this case, as otherwise delta
// will be zero and bad things will happen
if (fabs(max-min) < TOLERANCE) {
energyCutoff = max;
} else {
// Here we need to find the k'th highest energy. We do this using a recursive,
// binning and counting strategy. We divide the interval (min, max) into blockDim.x
// bins. We assign each energy to a bin, increment the count, and update
// the min and max. Then, we find the bin that contains the k'th lowest energy. If
// min==max for this bin, then we are done. Otherwise, we set the new (min, max) for
// the bins and recompute, assigning energies less than min to bin 0.
// loop until we break out at convergence
for (;;) {
/*if(tid==0) {*/
/*printf("%d\t%f\t%f\n", collIndex, min, max);*/
/*if (!isfinite(min) || !isfinite(max)) {*/
/*asm("trap;");*/
/*}*/
/*}*/
// zero out the buffers
binCounts[tid] = 0;
minBuffer[tid] = 9.0e99;
maxBuffer[tid] = 0.0;
__syncthreads();
// loop over all energies
for (int i=tid; i<length; i+=blockDim.x) {
float energy = energyBuffer[i];
// compute which bin this energy lies in
int index = float2int(floorf((blockDim.x-1) / delta * (energy - min)));
// we only count entries that lie within min and max
if ( (index >= 0) && (index < blockDim.x) ) {
// increment the counter using atomic function
atomicAdd(&binCounts[index], 1);
// update the min and max bounds for the bin using atomic functions
// note we need to cast to an integer, but floating point values
// still compare correctly when represented as integers
// this assumes that all energies are >0
atomicMin((unsigned int*)&minBuffer[index], __float_as_int(energy));
atomicMax((unsigned int*)&maxBuffer[index], __float_as_int(energy));
}
}
// make sure all threads are done
__syncthreads();
// now we need to do a cumulative sum, also known as an inclusive scan
// we will do this using a fast three-phase parallel algorithm
// this code assumes 1024 threads in 32 warps of 32 threads
// it will require modification to work with arbitrary sizes
// first, we do the cumulative sum within each warp
// this works because the threads are all implicity synchronized
if (lane >= 1) binCounts[tid] += binCounts[tid - 1];
if (lane >= 2) binCounts[tid] += binCounts[tid - 2];
if (lane >= 4) binCounts[tid] += binCounts[tid - 4];
if (lane >= 8) binCounts[tid] += binCounts[tid - 8];
if (lane >= 16) binCounts[tid] += binCounts[tid - 16];
__syncthreads();
// now we use a single thread to do a cumulative sum over the last elements of each
// of the 32 warps
if (warp == 0) {
if (lane >= 1) binCounts[32 * tid + 31] += binCounts[32 * (tid - 1) + 31];
if (lane >= 2) binCounts[32 * tid + 31] += binCounts[32 * (tid - 2) + 31];
if (lane >= 4) binCounts[32 * tid + 31] += binCounts[32 * (tid - 4) + 31];
if (lane >= 8) binCounts[32 * tid + 31] += binCounts[32 * (tid - 8) + 31];
if (lane >= 16) binCounts[32 * tid + 31] += binCounts[32 * (tid - 16) + 31];
}
__syncthreads();
// new each warp adds the value of the 31st element of the previous warp
// there is nothing to add for warp0, so we skip it
// the last element of each warp already has this sum from the previous step,
// so we skip it
if (warp>0 && lane<31) {
binCounts[tid] += binCounts[32 * warp - 1];
}
__syncthreads();
// now we need to find the bin containing the k'th highest value
// we use a single warp, where each thread looks at a block of 32 entries
// to find the smallest index where the cumulative sum is >= numActive
// we set flag if we find one
// this section uses implicit synchronization between threads in a single warp
if (warp == 0) {
int counter = 0;
int flag = false;
for (counter=0; counter<32; counter++) {
if (binCounts[32 * tid + counter] >= numActive) {
flag = true;
break;
}
}
// now find the smallest bin that meets the criteria
if (tid == 0) {
*bestBin = 1025;
}
// if we found a value >= numActive, then update the minimum value
if (flag) {
atomicMin(bestBin, 32 * tid + counter);
}
}
__syncthreads();
const float binMin = minBuffer[*bestBin];
const float binMax = maxBuffer[*bestBin];
// if all energies in this bin are the same, then we are done
if (fabs(binMin-binMax) < TOLERANCE) {
energyCutoff = binMax;
break;
}
// if this bin ends exactly on the k'th lowest energy, then we are done
if (binCounts[*bestBin] == numActive) {
energyCutoff = binMax;
break;
}
// otherwise, the correct value lies somewhere within this bin
// it will between binMin and binMax and we need to find the
// binCounts[*bestBin] - numActive 'th element
// we loop through again searching with these updated parameters
min = binMin;
max = binMax;
delta = max - min;
numActive = binCounts[*bestBin] - numActive;
__syncthreads();
}
}
// now we know the energyCutoff, so apply it to each group
for (int i=tid; i<length; i+=blockDim.x) {
if (energyBuffer[i] <= energyCutoff) {
activeArray[indexArray[i + start]] = 1.0;
}
else {
activeArray[indexArray[i + start]] = 0.0;
}
}
__syncthreads();
}
}
extern "C" __global__ void applyGroups(
float* __restrict__ groupActive,
float* __restrict__ restraintActive,
const int2* __restrict__ bounds,
int numGroups) {
for (int groupIndex=blockIdx.x; groupIndex<numGroups; groupIndex+=gridDim.x) {
float active = groupActive[groupIndex];
for (int i=bounds[groupIndex].x + threadIdx.x; i<bounds[groupIndex].y; i+=blockDim.x) {
restraintActive[i] *= active;
}
}
}
extern "C" __global__ void applyDistRest(
unsigned long long * __restrict__ force,
real* __restrict__ energyBuffer,
const int2* __restrict__ atomIndices,
const int* __restrict__ globalIndices,
const float3* __restrict__ restForces,
const float* __restrict__ globalEnergies,
const float* __restrict__ globalActive,
const int numDistRestraints) {
int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
float energyAccum = 0.0;
for (int restraintIndex=blockIdx.x*blockDim.x+threadIdx.x; restraintIndex<numDistRestraints; restraintIndex+=blockDim.x*gridDim.x) {
int globalIndex = globalIndices[restraintIndex];
if (globalActive[globalIndex]) {
int index1 = atomIndices[restraintIndex].x;
int index2 = atomIndices[restraintIndex].y;
energyAccum += globalEnergies[globalIndex];
float3 f = restForces[restraintIndex];
atomicAdd(&force[index1], static_cast<unsigned long long>((long long) (-f.x*0x100000000)));
atomicAdd(&force[index1 + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-f.y*0x100000000)));
atomicAdd(&force[index1 + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-f.z*0x100000000)));
atomicAdd(&force[index2], static_cast<unsigned long long>((long long) (f.x*0x100000000)));
atomicAdd(&force[index2 + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f.y*0x100000000)));
atomicAdd(&force[index2 + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f.z*0x100000000)));
}
}
energyBuffer[threadIndex] += energyAccum;
}
extern "C" __global__ void applyHyperbolicDistRest(
unsigned long long * __restrict__ force,
real* __restrict__ energyBuffer,
const int2* __restrict__ atomIndices,
const int* __restrict__ globalIndices,
const float3* __restrict__ restForces,
const float* __restrict__ globalEnergies,
const float* __restrict__ globalActive,
const int numDistRestraints) {
int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
float energyAccum = 0.0;
for (int restraintIndex=blockIdx.x*blockDim.x+threadIdx.x; restraintIndex<numDistRestraints; restraintIndex+=blockDim.x*gridDim.x) {
int globalIndex = globalIndices[restraintIndex];
if (globalActive[globalIndex]) {
int index1 = atomIndices[restraintIndex].x;
int index2 = atomIndices[restraintIndex].y;
energyAccum += globalEnergies[globalIndex];
float3 f = restForces[restraintIndex];
atomicAdd(&force[index1], static_cast<unsigned long long>((long long) (-f.x*0x100000000)));
atomicAdd(&force[index1 + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-f.y*0x100000000)));
atomicAdd(&force[index1 + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-f.z*0x100000000)));
atomicAdd(&force[index2], static_cast<unsigned long long>((long long) (f.x*0x100000000)));
atomicAdd(&force[index2 + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f.y*0x100000000)));
atomicAdd(&force[index2 + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f.z*0x100000000)));
}
}
energyBuffer[threadIndex] += energyAccum;
}
extern "C" __global__ void applyTorsionRest(
unsigned long long * __restrict__ force,
real* __restrict__ energyBuffer,
const int4* __restrict__ atomIndices,
const int* __restrict__ globalIndices,
const float3* __restrict__ restForces,
const float* __restrict__ globalEnergies,
const float* __restrict__ globalActive,
const int numRestraints) {
int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
float energyAccum = 0.0;
for (int restraintIndex=blockIdx.x*blockDim.x+threadIdx.x; restraintIndex<numRestraints; restraintIndex+=blockDim.x*gridDim.x) {
int globalIndex = globalIndices[restraintIndex];
if (globalActive[globalIndex]) {
int atom_i = atomIndices[restraintIndex].x;
int atom_j = atomIndices[restraintIndex].y;
int atom_k = atomIndices[restraintIndex].z;
int atom_l = atomIndices[restraintIndex].w;
energyAccum += globalEnergies[globalIndex];
// update forces
float3 f_i = restForces[restraintIndex * 4 + 0];
float3 f_j = restForces[restraintIndex * 4 + 1];
float3 f_k = restForces[restraintIndex * 4 + 2];
float3 f_l = restForces[restraintIndex * 4 + 3];
atomicAdd(&force[atom_i], static_cast<unsigned long long>((long long) (f_i.x*0x100000000)));
atomicAdd(&force[atom_i + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f_i.y*0x100000000)));
atomicAdd(&force[atom_i + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f_i.z*0x100000000)));
atomicAdd(&force[atom_j], static_cast<unsigned long long>((long long) (f_j.x*0x100000000)));
atomicAdd(&force[atom_j + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f_j.y*0x100000000)));
atomicAdd(&force[atom_j + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f_j.z*0x100000000)));
atomicAdd(&force[atom_k], static_cast<unsigned long long>((long long) (f_k.x*0x100000000)));
atomicAdd(&force[atom_k + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f_k.y*0x100000000)));
atomicAdd(&force[atom_k + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f_k.z*0x100000000)));
atomicAdd(&force[atom_l], static_cast<unsigned long long>((long long) (f_l.x*0x100000000)));
atomicAdd(&force[atom_l + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f_l.y*0x100000000)));
atomicAdd(&force[atom_l + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f_l.z*0x100000000)));
}
}
energyBuffer[threadIndex] += energyAccum;
}
extern "C" __global__ void applyDistProfileRest(
unsigned long long * __restrict__ force,
real* __restrict__ energyBuffer,
const int2* __restrict__ atomIndices,
const int* __restrict__ globalIndices,
const float3* __restrict__ restForces,
const float* __restrict__ globalEnergies,
const float* __restrict__ globalActive,
const int numRestraints) {
int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
float energyAccum = 0.0;
for (int restraintIndex=blockIdx.x*blockDim.x+threadIdx.x; restraintIndex<numRestraints; restraintIndex+=blockDim.x*gridDim.x) {
int globalIndex = globalIndices[restraintIndex];
if (globalActive[globalIndex]) {
int index1 = atomIndices[restraintIndex].x;
int index2 = atomIndices[restraintIndex].y;
energyAccum += globalEnergies[globalIndex];
float3 f = restForces[restraintIndex];
atomicAdd(&force[index1], static_cast<unsigned long long>((long long) (-f.x*0x100000000)));
atomicAdd(&force[index1 + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-f.y*0x100000000)));
atomicAdd(&force[index1 + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-f.z*0x100000000)));
atomicAdd(&force[index2], static_cast<unsigned long long>((long long) (f.x*0x100000000)));
atomicAdd(&force[index2 + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f.y*0x100000000)));
atomicAdd(&force[index2 + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f.z*0x100000000)));
}
}
energyBuffer[threadIndex] += energyAccum;
}
extern "C" __global__ void applyTorsProfileRest(
unsigned long long * __restrict__ force,
real* __restrict__ energyBuffer,
const int4* __restrict__ atomIndices0,
const int4* __restrict__ atomIndices1,
const int* __restrict__ globalIndices,
const float3* __restrict__ restForces,
const float* __restrict__ globalEnergies,
const float* __restrict__ globalActive,
const int numRestraints) {
int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
float energyAccum = 0.0;
for (int restraintIndex=blockIdx.x*blockDim.x+threadIdx.x; restraintIndex<numRestraints; restraintIndex+=blockDim.x*gridDim.x) {
int globalIndex = globalIndices[restraintIndex];
if (globalActive[globalIndex]) {
// update energy
energyAccum += globalEnergies[globalIndex];
// update phi
int phi_atom_i = atomIndices0[restraintIndex].x;
int phi_atom_j = atomIndices0[restraintIndex].y;
int phi_atom_k = atomIndices0[restraintIndex].z;
int phi_atom_l = atomIndices0[restraintIndex].w;
// update forces
float3 phi_f_i = restForces[restraintIndex * 8 + 0];
float3 phi_f_j = restForces[restraintIndex * 8 + 1];
float3 phi_f_k = restForces[restraintIndex * 8 + 2];
float3 phi_f_l = restForces[restraintIndex * 8 + 3];
atomicAdd(&force[phi_atom_i], static_cast<unsigned long long>((long long) (phi_f_i.x*0x100000000)));
atomicAdd(&force[phi_atom_i + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (phi_f_i.y*0x100000000)));
atomicAdd(&force[phi_atom_i + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (phi_f_i.z*0x100000000)));
atomicAdd(&force[phi_atom_j], static_cast<unsigned long long>((long long) (phi_f_j.x*0x100000000)));
atomicAdd(&force[phi_atom_j + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (phi_f_j.y*0x100000000)));
atomicAdd(&force[phi_atom_j + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (phi_f_j.z*0x100000000)));
atomicAdd(&force[phi_atom_k], static_cast<unsigned long long>((long long) (phi_f_k.x*0x100000000)));
atomicAdd(&force[phi_atom_k + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (phi_f_k.y*0x100000000)));
atomicAdd(&force[phi_atom_k + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (phi_f_k.z*0x100000000)));
atomicAdd(&force[phi_atom_l], static_cast<unsigned long long>((long long) (phi_f_l.x*0x100000000)));
atomicAdd(&force[phi_atom_l + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (phi_f_l.y*0x100000000)));
atomicAdd(&force[phi_atom_l + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (phi_f_l.z*0x100000000)));
// update psi
int psi_atom_i = atomIndices1[restraintIndex].x;
int psi_atom_j = atomIndices1[restraintIndex].y;
int psi_atom_k = atomIndices1[restraintIndex].z;
int psi_atom_l = atomIndices1[restraintIndex].w;
// update forces
float3 psi_f_i = restForces[restraintIndex * 8 + 4];
float3 psi_f_j = restForces[restraintIndex * 8 + 5];
float3 psi_f_k = restForces[restraintIndex * 8 + 6];
float3 psi_f_l = restForces[restraintIndex * 8 + 7];
atomicAdd(&force[psi_atom_i], static_cast<unsigned long long>((long long) (psi_f_i.x*0x100000000)));
atomicAdd(&force[psi_atom_i + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (psi_f_i.y*0x100000000)));
atomicAdd(&force[psi_atom_i + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (psi_f_i.z*0x100000000)));
atomicAdd(&force[psi_atom_j], static_cast<unsigned long long>((long long) (psi_f_j.x*0x100000000)));
atomicAdd(&force[psi_atom_j + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (psi_f_j.y*0x100000000)));
atomicAdd(&force[psi_atom_j + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (psi_f_j.z*0x100000000)));
atomicAdd(&force[psi_atom_k], static_cast<unsigned long long>((long long) (psi_f_k.x*0x100000000)));
atomicAdd(&force[psi_atom_k + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (psi_f_k.y*0x100000000)));
atomicAdd(&force[psi_atom_k + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (psi_f_k.z*0x100000000)));
atomicAdd(&force[psi_atom_l], static_cast<unsigned long long>((long long) (psi_f_l.x*0x100000000)));
atomicAdd(&force[psi_atom_l + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (psi_f_l.y*0x100000000)));
atomicAdd(&force[psi_atom_l + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (psi_f_l.z*0x100000000)));
}
}
energyBuffer[threadIndex] += energyAccum;
}
| 54d4e955b5003cea5df761a59e03ea82adc6fb86.cu | /*
Copyright 2015 by Justin MacCallum, Alberto Perez, Ken Dill
All rights reserved
*/
#include "assert.h"
#define ELEM_SWAP(a,b) { int t=(a);(a)=(b);(b)=t; }
__device__
float quick_select_float(const float* energy, int *index, int nelems, int select) {
int low, high, middle, ll, hh;
low = 0;
high = nelems - 1;
for (;;) {
if (high <= low) { /* One element only */
return energy[index[select]];
}
if (high == low + 1) { /* Two elements only */
if (energy[index[low]] > energy[index[high]])
ELEM_SWAP(index[low], index[high]);
return energy[index[select]];
}
/* Find median of low, middle and high items; swap into position low */
middle = (low + high) / 2;
if (energy[index[middle]] > energy[index[high]]) ELEM_SWAP(index[middle], index[high]);
if (energy[index[low]] > energy[index[high]]) ELEM_SWAP(index[low], index[high]);
if (energy[index[middle]] > energy[index[low]]) ELEM_SWAP(index[middle], index[low]);
/* Swap low item (now in position middle) into position (low+1) */
ELEM_SWAP(index[middle], index[low+1]);
/* Nibble from each end towards middle, swapping items when stuck */
ll = low + 1;
hh = high;
for (;;) {
do ll++; while (energy[index[low]] > energy[index[ll]]);
do hh--; while (energy[index[hh]] > energy[index[low]]);
if (hh < ll)
break;
ELEM_SWAP(index[ll], index[hh]);
}
/* Swap middle item (in position low) back into correct position */
ELEM_SWAP(index[low], index[hh]);
/* Re-set active partition */
if (hh <= select)
low = ll;
if (hh >= select)
high = hh - 1;
}
}
#undef ELEM_SWAP
__device__ void computeTorsionAngle(const real4* __restrict__ posq, int atom_i, int atom_j, int atom_k, int atom_l,
float3& r_ij, float3& r_kj, float3& r_kl, float3& m, float3& n,
float& len_r_kj, float& len_m, float& len_n, float& phi) {
// compute vectors
r_ij = trimTo3(posq[atom_j] - posq[atom_i]);
r_kj = trimTo3(posq[atom_j] - posq[atom_k]);
r_kl = trimTo3(posq[atom_l] - posq[atom_k]);
// compute normal vectors
m = cross(r_ij, r_kj);
n = cross(r_kj, r_kl);
// compute lengths
len_r_kj = sqrt(dot(r_kj, r_kj));
len_m = sqrt(dot(m, m));
len_n = sqrt(dot(n, n));
// compute angle phi
float x = dot(m / len_m, n / len_n);
float y = dot(cross(m / len_m, r_kj / len_r_kj), n / len_n);
phi = atan2(y, x) * 180. / 3.141592654;
}
__device__ void computeTorsionForce(const float dEdPhi, const float3& r_ij, const float3& r_kj, const float3& r_kl,
const float3& m, const float3& n, const float len_r_kj, const float len_m, const float len_n,
float3& F_i, float3& F_j, float3& F_k, float3& F_l) {
F_i = -180. / 3.141592654 * dEdPhi * len_r_kj * m / (len_m * len_m);
F_l = 180. / 3.141592654 * dEdPhi * len_r_kj * n / (len_n * len_n);
F_j = -F_i + dot(r_ij, r_kj) / (len_r_kj * len_r_kj) * F_i - dot(r_kl, r_kj) / (len_r_kj * len_r_kj) * F_l;
F_k = -F_l - dot(r_ij, r_kj) / (len_r_kj * len_r_kj) * F_i + dot(r_kl, r_kj) / (len_r_kj * len_r_kj) * F_l;
}
extern "C" __global__ void computeDistRest(
const real4* __restrict__ posq, // positions and charges
const int2* __restrict__ atomIndices, // pair of atom indices
const float4* __restrict__ distanceBounds, // r1, r2, r3, r4
const float* __restrict__ forceConstants, // k
int* __restrict__ indexToGlobal, // array of indices into global arrays
float* __restrict__ energies, // global array of restraint energies
float3* __restrict__ forceBuffer, // temporary buffer to hold the force
const int numRestraints) {
for (int index=blockIdx.x*blockDim.x+threadIdx.x; index<numRestraints; index+=blockDim.x*gridDim.x) {
// get my global index
const int globalIndex = indexToGlobal[index];
// get the distances
const float r1 = distanceBounds[index].x;
const float r2 = distanceBounds[index].y;
const float r3 = distanceBounds[index].z;
const float r4 = distanceBounds[index].w;
// get the force constant
const float k = forceConstants[index];
// get atom indices and compute distance
int atomIndexA = atomIndices[index].x;
int atomIndexB = atomIndices[index].y;
real4 delta = posq[atomIndexA] - posq[atomIndexB];
real distSquared = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real r = SQRT(distSquared);
// compute force and energy
float energy = 0.0;
float dEdR = 0.0;
float diff = 0.0;
float diff2 = 0.0;
float3 f;
if(r < r1) {
energy = k * (r - r1) * (r1 - r2) + 0.5 * k * (r1 - r2) * (r1 - r2);
dEdR = k * (r1 - r2);
}
else if(r < r2) {
diff = r - r2;
diff2 = diff * diff;
energy = 0.5 * k * diff2;
dEdR = k * diff;
}
else if(r < r3) {
dEdR = 0.0;
energy = 0.0;
}
else if(r < r4) {
diff = r - r3;
diff2 = diff * diff;
energy = 0.5 * k * diff2;
dEdR = k * diff;
}
else {
energy = k * (r - r4) * (r4 - r3) + 0.5 * k * (r4 - r3) * (r4 - r3);
dEdR = k * (r4 - r3);
}
assert(isfinite(energy));
// store force into local buffer
if (r > 0) {
f.x = delta.x * dEdR / r;
f.y = delta.y * dEdR / r;
f.z = delta.z * dEdR / r;
} else {
f.x = 0.0;
f.y = 0.0;
f.z = 0.0;
}
forceBuffer[index] = f;
// store energy into global buffer
energies[globalIndex] = energy;
}
}
extern "C" __global__ void computeHyperbolicDistRest(
const real4* __restrict__ posq, // positions and charges
const int2* __restrict__ atomIndices, // pair of atom indices
const float4* __restrict__ distanceBounds, // r1, r2, r3, r4
const float4* __restrict__ params, // k1, k2, a, b
int* __restrict__ indexToGlobal, // array of indices into global arrays
float* __restrict__ energies, // global array of restraint energies
float3* __restrict__ forceBuffer, // temporary buffer to hold the force
const int numRestraints) {
for (int index=blockIdx.x*blockDim.x+threadIdx.x; index<numRestraints; index+=blockDim.x*gridDim.x) {
// get my global index
const int globalIndex = indexToGlobal[index];
// get the distances
const float r1 = distanceBounds[index].x;
const float r2 = distanceBounds[index].y;
const float r3 = distanceBounds[index].z;
const float r4 = distanceBounds[index].w;
// get the parameters
const float k1 = params[index].x;
const float k2 = params[index].y;
const float a = params[index].z;
const float b = params[index].w;
// get atom indices and compute distance
int atomIndexA = atomIndices[index].x;
int atomIndexB = atomIndices[index].y;
real4 delta = posq[atomIndexA] - posq[atomIndexB];
real distSquared = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real r = SQRT(distSquared);
// compute force and energy
float energy = 0.0;
float dEdR = 0.0;
float diff = 0.0;
float diff2 = 0.0;
float3 f;
if(r < r1) {
energy = k1 * (r - r1) * (r1 - r2) + 0.5 * k1 * (r1 - r2) * (r1 - r2);
dEdR = k1 * (r1 - r2);
}
else if(r < r2) {
diff = r - r2;
diff2 = diff * diff;
energy = 0.5 * k1 * diff2;
dEdR = k1 * diff;
}
else if(r < r3) {
dEdR = 0.0;
energy = 0.0;
}
else if(r < r4) {
diff = r - r3;
diff2 = diff * diff;
energy = 0.5 * k2 * diff2;
dEdR = k2 * diff;
}
else {
energy = 0.5 * k2 * (b / (r - r3) + a);
dEdR = -0.5 * b * k2 / (r - r3) / (r - r3);
}
// store force into local buffer
if (r > 0) {
f.x = delta.x * dEdR / r;
f.y = delta.y * dEdR / r;
f.z = delta.z * dEdR / r;
} else {
f.x = 0.0;
f.y = 0.0;
f.z = 0.0;
}
forceBuffer[index] = f;
assert(isfinite(energy));
// store energy into global buffer
energies[globalIndex] = energy;
}
}
extern "C" __global__ void computeTorsionRest(
const real4* __restrict__ posq, // positions and charges
const int4* __restrict__ atomIndices, // indices of atom_{i,j,k,l}
const float3* __restrict__ params, // phi, deltaPhi, forceConstant
int* __restrict__ indexToGlobal, // array of indices into global arrays
float* __restrict__ energies, // global array of restraint energies
float3* __restrict__ forceBuffer, // temporary buffer to hold the force
// forceBuffer[index*4] -> atom_i
// forceBuffer[index*4 + 3] -> atom_l
const int numRestraints) {
for (int index=blockIdx.x*blockDim.x+threadIdx.x; index<numRestraints; index+=gridDim.x*blockDim.x) {
// get my global index
int globalIndex = indexToGlobal[index];
// get the atom indices
int4 indices = atomIndices[index];
int atom_i = indices.x;
int atom_j = indices.y;
int atom_k = indices.z;
int atom_l = indices.w;
// compute the angle and related quantities
float3 r_ij, r_kj, r_kl;
float3 m, n;
float len_r_kj;
float len_m;
float len_n;
float phi;
computeTorsionAngle(posq, atom_i, atom_j, atom_k, atom_l,
r_ij, r_kj, r_kl, m, n, len_r_kj, len_m, len_n, phi);
// compute E and dE/dphi
float phiEquil = params[index].x;
float phiDelta = params[index].y;
float forceConst = params[index].z;
float phiDiff = phi - phiEquil;
if (phiDiff < -180.) {
phiDiff += 360.;
} else if (phiDiff > 180.) {
phiDiff -= 360.;
}
float energy = 0.0;
float dEdPhi = 0.0;
if (phiDiff < -phiDelta) {
energy = 0.5 * forceConst * (phiDiff + phiDelta) * (phiDiff + phiDelta);
dEdPhi = forceConst * (phiDiff + phiDelta);
}
else if(phiDiff > phiDelta) {
energy = 0.5 * forceConst * (phiDiff - phiDelta) * (phiDiff - phiDelta);
dEdPhi = forceConst * (phiDiff - phiDelta);
}
else{
energy = 0.0;
dEdPhi = 0.0;
}
assert(isfinite(energy));
energies[globalIndex] = energy;
computeTorsionForce(dEdPhi, r_ij, r_kj, r_kl, m, n, len_r_kj, len_m, len_n,
forceBuffer[4 * index + 0], forceBuffer[4 * index + 1],
forceBuffer[4 * index + 2], forceBuffer[4 * index + 3]);
}
}
extern "C" __global__ void computeDistProfileRest(
const real4* __restrict__ posq, // positions and charges
const int2* __restrict__ atomIndices, // pair of atom indices
const float2* __restrict__ distRanges, // upper and lower bounds of spline
const int* __restrict__ nBins, // number of bins
const float4* __restrict__ splineParams, // a0, a1, a2, a3
const int2* __restrict__ paramBounds, // upper and lower bounds for each spline
const float* __restrict__ scaleFactor, // scale factor for energies and forces
const int* __restrict__ indexToGlobal, // index of this restraint in the global array
float* __restrict__ restraintEnergies, // global energy of each restraint
float3* __restrict__ restraintForce, // cache the forces for application later
const int numRestraints ) {
for (int index=blockIdx.x*blockDim.x+threadIdx.x; index<numRestraints; index+=blockDim.x*gridDim.x) {
// get my global index
int globalIndex = indexToGlobal[index];
// get atom indices and compute distance
int atomIndexA = atomIndices[index].x;
int atomIndexB = atomIndices[index].y;
real4 delta = posq[atomIndexA] - posq[atomIndexB];
real distSquared = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real r = SQRT(distSquared);
// compute bin
int bin = (int)( floor((r - distRanges[index].x) / (distRanges[index].y - distRanges[index].x) * nBins[index]) );
// compute the force and energy
float energy = 0.0;
float dEdR = 0.0;
float binWidth = (distRanges[index].y - distRanges[index].x) / nBins[index];
if (bin < 0){
energy = scaleFactor[index] * splineParams[paramBounds[index].x].x;
}
else if (bin >= nBins[index]) {
energy = scaleFactor[index] * (splineParams[paramBounds[index].y - 1].x +
splineParams[paramBounds[index].y - 1].y +
splineParams[paramBounds[index].y - 1].z +
splineParams[paramBounds[index].y - 1].w);
}
else {
float t = (r - bin * binWidth + distRanges[index].x) / binWidth;
float a0 = splineParams[ paramBounds[index].x + bin ].x;
float a1 = splineParams[ paramBounds[index].x + bin ].y;
float a2 = splineParams[ paramBounds[index].x + bin ].z;
float a3 = splineParams[ paramBounds[index].x + bin ].w;
energy = scaleFactor[index] * (a0 + a1 * t + a2 * t * t + a3 * t * t * t);
dEdR = scaleFactor[index] * (a1 + 2.0 * a2 * t + 3.0 * a3 * t * t) / binWidth;
}
assert(isfinite(energy));
// store force into local buffer
float3 f;
f.x = delta.x * dEdR / r;
f.y = delta.y * dEdR / r;
f.z = delta.z * dEdR / r;
restraintForce[index] = f;
// store energy into global buffer
restraintEnergies[globalIndex] = energy;
}
}
extern "C" __global__ void computeTorsProfileRest(
const real4* __restrict__ posq, // positions and charges
const int4* __restrict__ atomIndices0, // i,j,k,l for torsion 0
const int4* __restrict__ atomIndices1, // i,j,k,l for torsion 1
const int* __restrict__ nBins, // number of bins
const float4* __restrict__ params0, // a0 - a3
const float4* __restrict__ params1, // a4 - a7
const float4* __restrict__ params2, // a8 - a11
const float4* __restrict__ params3, // a12 - a15
const int2* __restrict__ paramBounds, // upper and lower bounds for each spline
const float* __restrict__ scaleFactor, // scale factor for energies and forces
const int* __restrict__ indexToGlobal, // index of this restraint in the global array
float* __restrict__ restraintEnergies, // global energy of each restraint
float3* __restrict__ forceBuffer, // cache the forces for application later
const int numRestraints ) {
for (int index=blockIdx.x*blockDim.x+threadIdx.x; index<numRestraints; index+=gridDim.x*blockDim.x) {
// get my global index
int globalIndex = indexToGlobal[index];
// compute phi
int phi_atom_i = atomIndices0[index].x;
int phi_atom_j = atomIndices0[index].y;
int phi_atom_k = atomIndices0[index].z;
int phi_atom_l = atomIndices0[index].w;
float3 phi_r_ij, phi_r_kj, phi_r_kl;
float3 phi_m, phi_n;
float phi_len_r_kj;
float phi_len_m;
float phi_len_n;
float phi;
computeTorsionAngle(posq, phi_atom_i, phi_atom_j, phi_atom_k, phi_atom_l,
phi_r_ij, phi_r_kj, phi_r_kl, phi_m, phi_n, phi_len_r_kj, phi_len_m, phi_len_n, phi);
// compute psi
int psi_atom_i = atomIndices1[index].x;
int psi_atom_j = atomIndices1[index].y;
int psi_atom_k = atomIndices1[index].z;
int psi_atom_l = atomIndices1[index].w;
float3 psi_r_ij, psi_r_kj, psi_r_kl;
float3 psi_m, psi_n;
float psi_len_r_kj;
float psi_len_m;
float psi_len_n;
float psi;
computeTorsionAngle(posq, psi_atom_i, psi_atom_j, psi_atom_k, psi_atom_l,
psi_r_ij, psi_r_kj, psi_r_kl, psi_m, psi_n, psi_len_r_kj, psi_len_m, psi_len_n, psi);
// compute bin indices
int i = (int)(floor((phi + 180.)/360. * nBins[index]));
int j = (int)(floor((psi + 180.)/360. * nBins[index]));
if (i >= nBins[index]) {
i = 0;
phi -= 360.;
}
if (i < 0) {
i = nBins[index] - 1;
phi += 360.;
}
if (j >= nBins[index]) {
j = 0;
psi -= 360.;
}
if (j < 0) {
j = nBins[index] - 1;
psi += 360.;
}
float delta = 360. / nBins[index];
float u = (phi - i * delta + 180.) / delta;
float v = (psi - j * delta + 180.) / delta;
int pi = paramBounds[index].x + i * nBins[index] + j;
float energy = params0[pi].x + params0[pi].y * v + params0[pi].z * v*v + params0[pi].w * v*v*v +
params1[pi].x * u + params1[pi].y * u*v + params1[pi].z * u*v*v + params1[pi].w * u*v*v*v +
params2[pi].x * u*u + params2[pi].y * u*u*v + params2[pi].z * u*u*v*v + params2[pi].w * u*u*v*v*v +
params3[pi].x * u*u*u + params3[pi].y * u*u*u*v + params3[pi].z * u*u*u*v*v + params3[pi].w * u*u*u*v*v*v;
energy = energy * scaleFactor[index];
assert(isfinite(energy));
float dEdPhi = params1[pi].x + params1[pi].y * v + params1[pi].z * v*v + params1[pi].w * v*v*v +
params2[pi].x * 2*u + params2[pi].y * 2*u*v + params2[pi].z * 2*u*v*v + params2[pi].w * 2*u*v*v*v +
params3[pi].x * 3*u*u + params3[pi].y * 3*u*u*v + params3[pi].z * 3*u*u*v*v + params3[pi].w * 3*u*u*v*v*v;
dEdPhi = dEdPhi * scaleFactor[index] / delta;
float dEdPsi = params0[pi].y + params0[pi].z * 2*v + params0[pi].w * 3*v*v +
params1[pi].y * u + params1[pi].z * u*2*v + params1[pi].w * u*3*v*v +
params2[pi].y * u*u + params2[pi].z * u*u*2*v + params2[pi].w * u*u*3*v*v +
params3[pi].y * u*u*u + params3[pi].z * u*u*u*2*v + params3[pi].w * u*u*u*3*v*v;
dEdPsi = dEdPsi * scaleFactor[index] / delta;
restraintEnergies[globalIndex] = energy;
computeTorsionForce(dEdPhi, phi_r_ij, phi_r_kj, phi_r_kl, phi_m, phi_n, phi_len_r_kj, phi_len_m, phi_len_n,
forceBuffer[8 * index + 0], forceBuffer[8 * index + 1],
forceBuffer[8 * index + 2], forceBuffer[8 * index + 3]);
computeTorsionForce(dEdPsi, psi_r_ij, psi_r_kj, psi_r_kl, psi_m, psi_n, psi_len_r_kj, psi_len_m, psi_len_n,
forceBuffer[8 * index + 4], forceBuffer[8 * index + 5],
forceBuffer[8 * index + 6], forceBuffer[8 * index + 7]);
}
}
extern "C" __global__ void evaluateAndActivate(
const int numGroups,
const int* __restrict__ numActiveArray,
const int2* __restrict__ boundsArray,
const int* __restrict__ pristineIndexArray,
int* __restrict__ tempIndexArray,
const float* __restrict__ energyArray,
float* __restrict__ activeArray,
float* __restrict__ targetEnergyArray)
{
// This kernel computes which restraints are active within each group.
// It uses "warp-level" programming to do this, where each warp within
// a threadblock computes the results for a single group. All threads
// within each group are implicity synchronized at the hardware
// level.
// These are runtime parameters set tby the C++ code.
const int groupsPerBlock = GROUPSPERBLOCK;
const int maxGroupSize = MAXGROUPSIZE;
// Because each warp is computing a separate interaction, we need to
// keep track of which block we are acting on and our index within
// that warp.
const int groupOffsetInBlock = threadIdx.x / 32;
const int threadOffsetInWarp = threadIdx.x % 32;
// We store the energies and indices into scratch buffers. These scratch
// buffers are also used for reductions within each warp.
extern __shared__ volatile char scratch[];
volatile float* warpScratchEnergy = (float*)&scratch[groupOffsetInBlock*maxGroupSize*(sizeof(float)+sizeof(int))];
volatile int* warpScratchIndices = (int*)&scratch[groupOffsetInBlock*maxGroupSize*(sizeof(float)+sizeof(int)) +
maxGroupSize*sizeof(float)];
volatile float* warpReductionBuffer = (float*)&scratch[groupOffsetInBlock*32*sizeof(float)];
// each warp loads the energies and indices for a group
for (int groupIndex=groupsPerBlock*blockIdx.x+groupOffsetInBlock; groupIndex<numGroups; groupIndex+=groupsPerBlock*gridDim.x) {
const int numActive = numActiveArray[groupIndex];
const int start = boundsArray[groupIndex].x;
const int end = boundsArray[groupIndex].y;
const int length = end - start;
const bool applyAll = (numActive == length);
// copy the energies to shared memory and setup indices
if (!applyAll) {
for(int i=threadOffsetInWarp; i<length; i+=32) {
const float energy = energyArray[pristineIndexArray[i + start]];
assert(isfinite(energy));
warpScratchIndices[i] = i;
warpScratchEnergy[i] = energy;
}
}
// now, we run the quick select algorithm.
// this is not parallelized, so we only run it on one thread
// per block.
if (threadOffsetInWarp==0) {
float energyCut = 0.0;
if (!applyAll) {
energyCut = quick_select_float((const float*)warpScratchEnergy, (int *)warpScratchIndices, length, numActive-1);
}
else {
energyCut = 9.99e99;
}
warpScratchEnergy[0] = energyCut;
}
// now we're back on all threads again
float energyCut = warpScratchEnergy[0];
float thisActive = 0.0;
float thisEnergy = 0.0;
// we are going to start writing to warpReductionBuffer,
// which may overlap with the warpScratch* buffers, so
// we need to make sure that all threads are done first.
__syncthreads();
// reset the reduction buffers to zero
warpReductionBuffer[threadOffsetInWarp] = 0.0;
// sum up the energy for each restraint
for(int i=threadOffsetInWarp+start; i<end; i+=32) {
thisEnergy = energyArray[pristineIndexArray[i]];
thisActive = (float)(thisEnergy <= energyCut);
activeArray[pristineIndexArray[i]] = thisActive;
warpReductionBuffer[threadOffsetInWarp] += thisActive * thisEnergy;
}
// now we do a parallel reduction within each warp
int totalThreads = 32;
int index2 = 0;
while (totalThreads > 1) {
int halfPoint = (totalThreads >> 1);
if (threadOffsetInWarp < halfPoint) {
index2 = threadOffsetInWarp + halfPoint;
warpReductionBuffer[threadOffsetInWarp] += warpReductionBuffer[index2];
}
totalThreads = halfPoint;
}
// now store the energy for this group
if (threadOffsetInWarp == 0) {
targetEnergyArray[groupIndex] = warpReductionBuffer[0];
assert(isfinite(warpReductionBuffer[0]));
}
// make sure we're all done before we start again
__syncthreads();
}
}
__device__ void findMinMax(int length, const float* energyArray, float* minBuffer, float* maxBuffer) {
const int tid = threadIdx.x;
float energy;
float min = 9.9e99;
float max = -9.9e99;
// Each thread computes the min and max for it's energies and stores them in the buffers
for (int i=tid; i<length; i+=blockDim.x) {
energy = energyArray[i];
if (energy < min) {
min = energy;
}
if (energy > max) {
max = energy;
}
}
minBuffer[tid] = min;
maxBuffer[tid] = max;
__syncthreads();
// Now we do a parallel reduction
int totalThreads = blockDim.x;
int index2 = 0;
float temp = 0;
while (totalThreads > 1) {
int halfPoint = (totalThreads >> 1);
if (tid < halfPoint) {
index2 = tid + halfPoint;
temp = minBuffer[index2];
if (temp < minBuffer[tid]) {
minBuffer[tid] = temp;
}
temp = maxBuffer[index2];
if (temp > maxBuffer[tid]) {
maxBuffer[tid] = temp;
}
}
__syncthreads();
totalThreads = halfPoint;
}
__syncthreads();
}
extern "C" __global__ void evaluateAndActivateCollections(
const int numCollections,
const int* __restrict__ numActiveArray,
const int2* __restrict__ boundsArray,
const int* __restrict__ indexArray,
const float* __restrict__ energyArray,
float* __restrict__ activeArray)
{
const float TOLERANCE = 1e-4;
const int maxCollectionSize = MAXCOLLECTIONSIZE;
const int tid = threadIdx.x;
const int warp = tid / 32;
const int lane = tid % 32; // which thread are we within this warp
// shared memory:
// energyBuffer: maxCollectionSize floats
// min/max Buffer: gridDim.x floats
// binCounts: gridDim.x ints
extern __shared__ char collectionScratch[];
float* energyBuffer = (float*)&collectionScratch[0];
float* minBuffer = (float*)&collectionScratch[maxCollectionSize*sizeof(float)];
float* maxBuffer = (float*)&collectionScratch[(maxCollectionSize+blockDim.x)*sizeof(float)];
int* binCounts = (int*)&collectionScratch[(maxCollectionSize+2*blockDim.x)*sizeof(float)];
int* bestBin = (int*)&(collectionScratch[(maxCollectionSize + 2 * blockDim.x) * sizeof(float) +
blockDim.x * sizeof(int)]);
for (int collIndex=blockIdx.x; collIndex<numCollections; collIndex+=gridDim.x) {
// we need to find the value of the cutoff energy below, then we will
// activate all groups with lower energy
float energyCutoff = 0.0;
int numActive = numActiveArray[collIndex];
int start = boundsArray[collIndex].x;
int end = boundsArray[collIndex].y;
int length = end - start;
// load the energy buffer for this collection
for (int i=tid; i<length; i+=blockDim.x) {
const float energy = energyArray[indexArray[start + i]];
assert(isfinite(energy));
energyBuffer[i] = energy;
}
__syncthreads();
findMinMax(length, energyBuffer, minBuffer, maxBuffer);
float min = minBuffer[0];
float max = maxBuffer[0];
float delta = max - min;
// All of the energies are the same, so they should all be active.
// Note: we need to break out here in this case, as otherwise delta
// will be zero and bad things will happen
if (fabs(max-min) < TOLERANCE) {
energyCutoff = max;
} else {
// Here we need to find the k'th highest energy. We do this using a recursive,
// binning and counting strategy. We divide the interval (min, max) into blockDim.x
// bins. We assign each energy to a bin, increment the count, and update
// the min and max. Then, we find the bin that contains the k'th lowest energy. If
// min==max for this bin, then we are done. Otherwise, we set the new (min, max) for
// the bins and recompute, assigning energies less than min to bin 0.
// loop until we break out at convergence
for (;;) {
/*if(tid==0) {*/
/*printf("%d\t%f\t%f\n", collIndex, min, max);*/
/*if (!isfinite(min) || !isfinite(max)) {*/
/*asm("trap;");*/
/*}*/
/*}*/
// zero out the buffers
binCounts[tid] = 0;
minBuffer[tid] = 9.0e99;
maxBuffer[tid] = 0.0;
__syncthreads();
// loop over all energies
for (int i=tid; i<length; i+=blockDim.x) {
float energy = energyBuffer[i];
// compute which bin this energy lies in
int index = float2int(floorf((blockDim.x-1) / delta * (energy - min)));
// we only count entries that lie within min and max
if ( (index >= 0) && (index < blockDim.x) ) {
// increment the counter using atomic function
atomicAdd(&binCounts[index], 1);
// update the min and max bounds for the bin using atomic functions
// note we need to cast to an integer, but floating point values
// still compare correctly when represented as integers
// this assumes that all energies are >0
atomicMin((unsigned int*)&minBuffer[index], __float_as_int(energy));
atomicMax((unsigned int*)&maxBuffer[index], __float_as_int(energy));
}
}
// make sure all threads are done
__syncthreads();
// now we need to do a cumulative sum, also known as an inclusive scan
// we will do this using a fast three-phase parallel algorithm
// this code assumes 1024 threads in 32 warps of 32 threads
// it will require modification to work with arbitrary sizes
// first, we do the cumulative sum within each warp
// this works because the threads are all implicity synchronized
if (lane >= 1) binCounts[tid] += binCounts[tid - 1];
if (lane >= 2) binCounts[tid] += binCounts[tid - 2];
if (lane >= 4) binCounts[tid] += binCounts[tid - 4];
if (lane >= 8) binCounts[tid] += binCounts[tid - 8];
if (lane >= 16) binCounts[tid] += binCounts[tid - 16];
__syncthreads();
// now we use a single thread to do a cumulative sum over the last elements of each
// of the 32 warps
if (warp == 0) {
if (lane >= 1) binCounts[32 * tid + 31] += binCounts[32 * (tid - 1) + 31];
if (lane >= 2) binCounts[32 * tid + 31] += binCounts[32 * (tid - 2) + 31];
if (lane >= 4) binCounts[32 * tid + 31] += binCounts[32 * (tid - 4) + 31];
if (lane >= 8) binCounts[32 * tid + 31] += binCounts[32 * (tid - 8) + 31];
if (lane >= 16) binCounts[32 * tid + 31] += binCounts[32 * (tid - 16) + 31];
}
__syncthreads();
// new each warp adds the value of the 31st element of the previous warp
// there is nothing to add for warp0, so we skip it
// the last element of each warp already has this sum from the previous step,
// so we skip it
if (warp>0 && lane<31) {
binCounts[tid] += binCounts[32 * warp - 1];
}
__syncthreads();
// now we need to find the bin containing the k'th highest value
// we use a single warp, where each thread looks at a block of 32 entries
// to find the smallest index where the cumulative sum is >= numActive
// we set flag if we find one
// this section uses implicit synchronization between threads in a single warp
if (warp == 0) {
int counter = 0;
int flag = false;
for (counter=0; counter<32; counter++) {
if (binCounts[32 * tid + counter] >= numActive) {
flag = true;
break;
}
}
// now find the smallest bin that meets the criteria
if (tid == 0) {
*bestBin = 1025;
}
// if we found a value >= numActive, then update the minimum value
if (flag) {
atomicMin(bestBin, 32 * tid + counter);
}
}
__syncthreads();
const float binMin = minBuffer[*bestBin];
const float binMax = maxBuffer[*bestBin];
// if all energies in this bin are the same, then we are done
if (fabs(binMin-binMax) < TOLERANCE) {
energyCutoff = binMax;
break;
}
// if this bin ends exactly on the k'th lowest energy, then we are done
if (binCounts[*bestBin] == numActive) {
energyCutoff = binMax;
break;
}
// otherwise, the correct value lies somewhere within this bin
// it will between binMin and binMax and we need to find the
// binCounts[*bestBin] - numActive 'th element
// we loop through again searching with these updated parameters
min = binMin;
max = binMax;
delta = max - min;
numActive = binCounts[*bestBin] - numActive;
__syncthreads();
}
}
// now we know the energyCutoff, so apply it to each group
for (int i=tid; i<length; i+=blockDim.x) {
if (energyBuffer[i] <= energyCutoff) {
activeArray[indexArray[i + start]] = 1.0;
}
else {
activeArray[indexArray[i + start]] = 0.0;
}
}
__syncthreads();
}
}
extern "C" __global__ void applyGroups(
float* __restrict__ groupActive,
float* __restrict__ restraintActive,
const int2* __restrict__ bounds,
int numGroups) {
for (int groupIndex=blockIdx.x; groupIndex<numGroups; groupIndex+=gridDim.x) {
float active = groupActive[groupIndex];
for (int i=bounds[groupIndex].x + threadIdx.x; i<bounds[groupIndex].y; i+=blockDim.x) {
restraintActive[i] *= active;
}
}
}
extern "C" __global__ void applyDistRest(
unsigned long long * __restrict__ force,
real* __restrict__ energyBuffer,
const int2* __restrict__ atomIndices,
const int* __restrict__ globalIndices,
const float3* __restrict__ restForces,
const float* __restrict__ globalEnergies,
const float* __restrict__ globalActive,
const int numDistRestraints) {
int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
float energyAccum = 0.0;
for (int restraintIndex=blockIdx.x*blockDim.x+threadIdx.x; restraintIndex<numDistRestraints; restraintIndex+=blockDim.x*gridDim.x) {
int globalIndex = globalIndices[restraintIndex];
if (globalActive[globalIndex]) {
int index1 = atomIndices[restraintIndex].x;
int index2 = atomIndices[restraintIndex].y;
energyAccum += globalEnergies[globalIndex];
float3 f = restForces[restraintIndex];
atomicAdd(&force[index1], static_cast<unsigned long long>((long long) (-f.x*0x100000000)));
atomicAdd(&force[index1 + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-f.y*0x100000000)));
atomicAdd(&force[index1 + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-f.z*0x100000000)));
atomicAdd(&force[index2], static_cast<unsigned long long>((long long) (f.x*0x100000000)));
atomicAdd(&force[index2 + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f.y*0x100000000)));
atomicAdd(&force[index2 + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f.z*0x100000000)));
}
}
energyBuffer[threadIndex] += energyAccum;
}
extern "C" __global__ void applyHyperbolicDistRest(
unsigned long long * __restrict__ force,
real* __restrict__ energyBuffer,
const int2* __restrict__ atomIndices,
const int* __restrict__ globalIndices,
const float3* __restrict__ restForces,
const float* __restrict__ globalEnergies,
const float* __restrict__ globalActive,
const int numDistRestraints) {
int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
float energyAccum = 0.0;
for (int restraintIndex=blockIdx.x*blockDim.x+threadIdx.x; restraintIndex<numDistRestraints; restraintIndex+=blockDim.x*gridDim.x) {
int globalIndex = globalIndices[restraintIndex];
if (globalActive[globalIndex]) {
int index1 = atomIndices[restraintIndex].x;
int index2 = atomIndices[restraintIndex].y;
energyAccum += globalEnergies[globalIndex];
float3 f = restForces[restraintIndex];
atomicAdd(&force[index1], static_cast<unsigned long long>((long long) (-f.x*0x100000000)));
atomicAdd(&force[index1 + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-f.y*0x100000000)));
atomicAdd(&force[index1 + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-f.z*0x100000000)));
atomicAdd(&force[index2], static_cast<unsigned long long>((long long) (f.x*0x100000000)));
atomicAdd(&force[index2 + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f.y*0x100000000)));
atomicAdd(&force[index2 + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f.z*0x100000000)));
}
}
energyBuffer[threadIndex] += energyAccum;
}
extern "C" __global__ void applyTorsionRest(
unsigned long long * __restrict__ force,
real* __restrict__ energyBuffer,
const int4* __restrict__ atomIndices,
const int* __restrict__ globalIndices,
const float3* __restrict__ restForces,
const float* __restrict__ globalEnergies,
const float* __restrict__ globalActive,
const int numRestraints) {
int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
float energyAccum = 0.0;
for (int restraintIndex=blockIdx.x*blockDim.x+threadIdx.x; restraintIndex<numRestraints; restraintIndex+=blockDim.x*gridDim.x) {
int globalIndex = globalIndices[restraintIndex];
if (globalActive[globalIndex]) {
int atom_i = atomIndices[restraintIndex].x;
int atom_j = atomIndices[restraintIndex].y;
int atom_k = atomIndices[restraintIndex].z;
int atom_l = atomIndices[restraintIndex].w;
energyAccum += globalEnergies[globalIndex];
// update forces
float3 f_i = restForces[restraintIndex * 4 + 0];
float3 f_j = restForces[restraintIndex * 4 + 1];
float3 f_k = restForces[restraintIndex * 4 + 2];
float3 f_l = restForces[restraintIndex * 4 + 3];
atomicAdd(&force[atom_i], static_cast<unsigned long long>((long long) (f_i.x*0x100000000)));
atomicAdd(&force[atom_i + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f_i.y*0x100000000)));
atomicAdd(&force[atom_i + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f_i.z*0x100000000)));
atomicAdd(&force[atom_j], static_cast<unsigned long long>((long long) (f_j.x*0x100000000)));
atomicAdd(&force[atom_j + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f_j.y*0x100000000)));
atomicAdd(&force[atom_j + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f_j.z*0x100000000)));
atomicAdd(&force[atom_k], static_cast<unsigned long long>((long long) (f_k.x*0x100000000)));
atomicAdd(&force[atom_k + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f_k.y*0x100000000)));
atomicAdd(&force[atom_k + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f_k.z*0x100000000)));
atomicAdd(&force[atom_l], static_cast<unsigned long long>((long long) (f_l.x*0x100000000)));
atomicAdd(&force[atom_l + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f_l.y*0x100000000)));
atomicAdd(&force[atom_l + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f_l.z*0x100000000)));
}
}
energyBuffer[threadIndex] += energyAccum;
}
extern "C" __global__ void applyDistProfileRest(
unsigned long long * __restrict__ force,
real* __restrict__ energyBuffer,
const int2* __restrict__ atomIndices,
const int* __restrict__ globalIndices,
const float3* __restrict__ restForces,
const float* __restrict__ globalEnergies,
const float* __restrict__ globalActive,
const int numRestraints) {
int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
float energyAccum = 0.0;
for (int restraintIndex=blockIdx.x*blockDim.x+threadIdx.x; restraintIndex<numRestraints; restraintIndex+=blockDim.x*gridDim.x) {
int globalIndex = globalIndices[restraintIndex];
if (globalActive[globalIndex]) {
int index1 = atomIndices[restraintIndex].x;
int index2 = atomIndices[restraintIndex].y;
energyAccum += globalEnergies[globalIndex];
float3 f = restForces[restraintIndex];
atomicAdd(&force[index1], static_cast<unsigned long long>((long long) (-f.x*0x100000000)));
atomicAdd(&force[index1 + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-f.y*0x100000000)));
atomicAdd(&force[index1 + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-f.z*0x100000000)));
atomicAdd(&force[index2], static_cast<unsigned long long>((long long) (f.x*0x100000000)));
atomicAdd(&force[index2 + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f.y*0x100000000)));
atomicAdd(&force[index2 + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f.z*0x100000000)));
}
}
energyBuffer[threadIndex] += energyAccum;
}
extern "C" __global__ void applyTorsProfileRest(
unsigned long long * __restrict__ force,
real* __restrict__ energyBuffer,
const int4* __restrict__ atomIndices0,
const int4* __restrict__ atomIndices1,
const int* __restrict__ globalIndices,
const float3* __restrict__ restForces,
const float* __restrict__ globalEnergies,
const float* __restrict__ globalActive,
const int numRestraints) {
int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
float energyAccum = 0.0;
for (int restraintIndex=blockIdx.x*blockDim.x+threadIdx.x; restraintIndex<numRestraints; restraintIndex+=blockDim.x*gridDim.x) {
int globalIndex = globalIndices[restraintIndex];
if (globalActive[globalIndex]) {
// update energy
energyAccum += globalEnergies[globalIndex];
// update phi
int phi_atom_i = atomIndices0[restraintIndex].x;
int phi_atom_j = atomIndices0[restraintIndex].y;
int phi_atom_k = atomIndices0[restraintIndex].z;
int phi_atom_l = atomIndices0[restraintIndex].w;
// update forces
float3 phi_f_i = restForces[restraintIndex * 8 + 0];
float3 phi_f_j = restForces[restraintIndex * 8 + 1];
float3 phi_f_k = restForces[restraintIndex * 8 + 2];
float3 phi_f_l = restForces[restraintIndex * 8 + 3];
atomicAdd(&force[phi_atom_i], static_cast<unsigned long long>((long long) (phi_f_i.x*0x100000000)));
atomicAdd(&force[phi_atom_i + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (phi_f_i.y*0x100000000)));
atomicAdd(&force[phi_atom_i + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (phi_f_i.z*0x100000000)));
atomicAdd(&force[phi_atom_j], static_cast<unsigned long long>((long long) (phi_f_j.x*0x100000000)));
atomicAdd(&force[phi_atom_j + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (phi_f_j.y*0x100000000)));
atomicAdd(&force[phi_atom_j + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (phi_f_j.z*0x100000000)));
atomicAdd(&force[phi_atom_k], static_cast<unsigned long long>((long long) (phi_f_k.x*0x100000000)));
atomicAdd(&force[phi_atom_k + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (phi_f_k.y*0x100000000)));
atomicAdd(&force[phi_atom_k + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (phi_f_k.z*0x100000000)));
atomicAdd(&force[phi_atom_l], static_cast<unsigned long long>((long long) (phi_f_l.x*0x100000000)));
atomicAdd(&force[phi_atom_l + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (phi_f_l.y*0x100000000)));
atomicAdd(&force[phi_atom_l + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (phi_f_l.z*0x100000000)));
// update psi
int psi_atom_i = atomIndices1[restraintIndex].x;
int psi_atom_j = atomIndices1[restraintIndex].y;
int psi_atom_k = atomIndices1[restraintIndex].z;
int psi_atom_l = atomIndices1[restraintIndex].w;
// update forces
float3 psi_f_i = restForces[restraintIndex * 8 + 4];
float3 psi_f_j = restForces[restraintIndex * 8 + 5];
float3 psi_f_k = restForces[restraintIndex * 8 + 6];
float3 psi_f_l = restForces[restraintIndex * 8 + 7];
atomicAdd(&force[psi_atom_i], static_cast<unsigned long long>((long long) (psi_f_i.x*0x100000000)));
atomicAdd(&force[psi_atom_i + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (psi_f_i.y*0x100000000)));
atomicAdd(&force[psi_atom_i + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (psi_f_i.z*0x100000000)));
atomicAdd(&force[psi_atom_j], static_cast<unsigned long long>((long long) (psi_f_j.x*0x100000000)));
atomicAdd(&force[psi_atom_j + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (psi_f_j.y*0x100000000)));
atomicAdd(&force[psi_atom_j + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (psi_f_j.z*0x100000000)));
atomicAdd(&force[psi_atom_k], static_cast<unsigned long long>((long long) (psi_f_k.x*0x100000000)));
atomicAdd(&force[psi_atom_k + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (psi_f_k.y*0x100000000)));
atomicAdd(&force[psi_atom_k + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (psi_f_k.z*0x100000000)));
atomicAdd(&force[psi_atom_l], static_cast<unsigned long long>((long long) (psi_f_l.x*0x100000000)));
atomicAdd(&force[psi_atom_l + PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (psi_f_l.y*0x100000000)));
atomicAdd(&force[psi_atom_l + 2 * PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (psi_f_l.z*0x100000000)));
}
}
energyBuffer[threadIndex] += energyAccum;
}
|
84649599e465a0a7d292ffb43eaf50bfa6ad739b.hip | // !!! This is a file automatically generated by hipify!!!
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "functors.hpp"
#include "types.hpp"
#include "vector_traits.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/span.hpp"
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <class T, class EltwiseOp, class ActivationOp, std::size_t N>
__global__ void biasN_eltwise_op_generic_op_inplace_vec(Span<T> inplace_output, size_type inner_size, View<T> bias, View<T> eltwise, const typename EltwiseOp::Params eltwise_params, const typename ActivationOp::Params act_params) {
using vector_type = get_vector_type_t<T, N>;
auto inplace_output_vPtr = vector_type::get_pointer(inplace_output.data());
auto eltwise_vPtr = vector_type::get_pointer(eltwise.data());
EltwiseOp eltwise_op(eltwise_params);
ActivationOp activation_op(act_params);
for (auto i : grid_stride_range(inplace_output.size() / vector_type::size())) {
const index_type bias_idx = (i / inner_size) % bias.size();
vector_type output_vec, eltwise_vec;
v_load(output_vec, inplace_output_vPtr[i]);
v_load(eltwise_vec, eltwise_vPtr[i]);
for(int j = 0; j < output_vec.size(); j++)
output_vec.data[j] = activation_op(eltwise_op(output_vec.data[j] + bias[bias_idx], eltwise_vec.data[j]));
v_store(inplace_output_vPtr[i], output_vec);
}
}
}
template <class T, class EltwiseOp, class ActivationOp, std::size_t N> static
void launch_vectorized_biasN_eltwise_op_generic_op_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, const typename EltwiseOp::Params& eltwise_params, const typename ActivationOp::Params& act_params) {
CV_Assert(is_fully_aligned<T>(inplace_output, N));
CV_Assert(inplace_output.size() % bias.size() == 0);
CV_Assert(is_fully_aligned<T>(eltwise, N));
CV_Assert(inner_size % N == 0);
auto kernel = raw::biasN_eltwise_op_generic_op_inplace_vec<T, EltwiseOp, ActivationOp, N>;
auto policy = make_policy(kernel, inplace_output.size() / N, 0, stream);
launch_kernel(kernel, policy, inplace_output, inner_size / N, bias, eltwise, eltwise_params, act_params);
}
template <class T, class EltwiseOp, class ActivationOp> static
void biasN_eltwise_op_generic_op_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, const typename EltwiseOp::Params& eltwise_params = {}, const typename ActivationOp::Params& act_params = {}) {
CV_Assert(inplace_output.size() == eltwise.size());
if (is_fully_aligned<T>(inplace_output, 4) && is_fully_aligned<T>(eltwise, 4) && inner_size % 4 == 0) {
launch_vectorized_biasN_eltwise_op_generic_op_inplace<T, EltwiseOp, ActivationOp, 4>(stream, inplace_output, inner_size, bias, eltwise, eltwise_params, act_params);
} else if (is_fully_aligned<T>(inplace_output, 2) && is_fully_aligned<T>(eltwise, 2) && inner_size % 2 == 0) {
launch_vectorized_biasN_eltwise_op_generic_op_inplace<T, EltwiseOp, ActivationOp, 2>(stream, inplace_output, inner_size, bias, eltwise, eltwise_params, act_params);
} else {
launch_vectorized_biasN_eltwise_op_generic_op_inplace<T, EltwiseOp, ActivationOp, 1>(stream, inplace_output, inner_size, bias, eltwise, eltwise_params, act_params);
}
}
template <class T>
void biasN_eltwise_sum_2_identity_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) {
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, IdentityFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise);
}
template <class T>
void biasN_eltwise_sum_2_relu_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, T slope) {
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, ReLUFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise, {}, {slope});
}
template <class T>
void biasN_eltwise_sum_2_clipped_relu_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, T floor, T ceiling) {
CV_Assert(static_cast<double>(floor) <= static_cast<double>(ceiling));
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, ClippedReLUFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise, {}, {floor, ceiling});
}
template <class T>
void biasN_eltwise_sum_2_tanh_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) {
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, TanHFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise);
}
template <class T>
void biasN_eltwise_sum_2_swish_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) {
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, SwishFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise);
}
template <class T>
void biasN_eltwise_sum_2_mish_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) {
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, MishFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise);
}
template <class T>
void biasN_eltwise_sum_2_sigmoid_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) {
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, SigmoidFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise);
}
template <class T>
void biasN_eltwise_sum_2_power_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, T exp, T scale, T shift) {
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, PowerFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise, {}, {exp, scale, shift});
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void biasN_eltwise_sum_2_identity_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>);
template void biasN_eltwise_sum_2_relu_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>, __half);
template void biasN_eltwise_sum_2_clipped_relu_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>, __half, __half);
template void biasN_eltwise_sum_2_tanh_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>);
template void biasN_eltwise_sum_2_swish_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>);
template void biasN_eltwise_sum_2_mish_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>);
template void biasN_eltwise_sum_2_sigmoid_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>);
template void biasN_eltwise_sum_2_power_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>, __half, __half, __half);
#endif
template void biasN_eltwise_sum_2_identity_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>);
template void biasN_eltwise_sum_2_relu_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>, float);
template void biasN_eltwise_sum_2_clipped_relu_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>, float, float);
template void biasN_eltwise_sum_2_tanh_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>);
template void biasN_eltwise_sum_2_swish_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>);
template void biasN_eltwise_sum_2_mish_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>);
template void biasN_eltwise_sum_2_sigmoid_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>);
template void biasN_eltwise_sum_2_power_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>, float, float, float);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */
| 84649599e465a0a7d292ffb43eaf50bfa6ad739b.cu | // This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "functors.hpp"
#include "types.hpp"
#include "vector_traits.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/span.hpp"
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <class T, class EltwiseOp, class ActivationOp, std::size_t N>
__global__ void biasN_eltwise_op_generic_op_inplace_vec(Span<T> inplace_output, size_type inner_size, View<T> bias, View<T> eltwise, const typename EltwiseOp::Params eltwise_params, const typename ActivationOp::Params act_params) {
using vector_type = get_vector_type_t<T, N>;
auto inplace_output_vPtr = vector_type::get_pointer(inplace_output.data());
auto eltwise_vPtr = vector_type::get_pointer(eltwise.data());
EltwiseOp eltwise_op(eltwise_params);
ActivationOp activation_op(act_params);
for (auto i : grid_stride_range(inplace_output.size() / vector_type::size())) {
const index_type bias_idx = (i / inner_size) % bias.size();
vector_type output_vec, eltwise_vec;
v_load(output_vec, inplace_output_vPtr[i]);
v_load(eltwise_vec, eltwise_vPtr[i]);
for(int j = 0; j < output_vec.size(); j++)
output_vec.data[j] = activation_op(eltwise_op(output_vec.data[j] + bias[bias_idx], eltwise_vec.data[j]));
v_store(inplace_output_vPtr[i], output_vec);
}
}
}
template <class T, class EltwiseOp, class ActivationOp, std::size_t N> static
void launch_vectorized_biasN_eltwise_op_generic_op_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, const typename EltwiseOp::Params& eltwise_params, const typename ActivationOp::Params& act_params) {
CV_Assert(is_fully_aligned<T>(inplace_output, N));
CV_Assert(inplace_output.size() % bias.size() == 0);
CV_Assert(is_fully_aligned<T>(eltwise, N));
CV_Assert(inner_size % N == 0);
auto kernel = raw::biasN_eltwise_op_generic_op_inplace_vec<T, EltwiseOp, ActivationOp, N>;
auto policy = make_policy(kernel, inplace_output.size() / N, 0, stream);
launch_kernel(kernel, policy, inplace_output, inner_size / N, bias, eltwise, eltwise_params, act_params);
}
template <class T, class EltwiseOp, class ActivationOp> static
void biasN_eltwise_op_generic_op_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, const typename EltwiseOp::Params& eltwise_params = {}, const typename ActivationOp::Params& act_params = {}) {
CV_Assert(inplace_output.size() == eltwise.size());
if (is_fully_aligned<T>(inplace_output, 4) && is_fully_aligned<T>(eltwise, 4) && inner_size % 4 == 0) {
launch_vectorized_biasN_eltwise_op_generic_op_inplace<T, EltwiseOp, ActivationOp, 4>(stream, inplace_output, inner_size, bias, eltwise, eltwise_params, act_params);
} else if (is_fully_aligned<T>(inplace_output, 2) && is_fully_aligned<T>(eltwise, 2) && inner_size % 2 == 0) {
launch_vectorized_biasN_eltwise_op_generic_op_inplace<T, EltwiseOp, ActivationOp, 2>(stream, inplace_output, inner_size, bias, eltwise, eltwise_params, act_params);
} else {
launch_vectorized_biasN_eltwise_op_generic_op_inplace<T, EltwiseOp, ActivationOp, 1>(stream, inplace_output, inner_size, bias, eltwise, eltwise_params, act_params);
}
}
template <class T>
void biasN_eltwise_sum_2_identity_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) {
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, IdentityFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise);
}
template <class T>
void biasN_eltwise_sum_2_relu_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, T slope) {
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, ReLUFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise, {}, {slope});
}
template <class T>
void biasN_eltwise_sum_2_clipped_relu_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, T floor, T ceiling) {
CV_Assert(static_cast<double>(floor) <= static_cast<double>(ceiling));
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, ClippedReLUFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise, {}, {floor, ceiling});
}
template <class T>
void biasN_eltwise_sum_2_tanh_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) {
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, TanHFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise);
}
template <class T>
void biasN_eltwise_sum_2_swish_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) {
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, SwishFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise);
}
template <class T>
void biasN_eltwise_sum_2_mish_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) {
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, MishFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise);
}
template <class T>
void biasN_eltwise_sum_2_sigmoid_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise) {
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, SigmoidFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise);
}
template <class T>
void biasN_eltwise_sum_2_power_inplace(const Stream& stream, Span<T> inplace_output, std::size_t inner_size, View<T> bias, View<T> eltwise, T exp, T scale, T shift) {
biasN_eltwise_op_generic_op_inplace<T, SumFunctor<T>, PowerFunctor<T>>(stream, inplace_output, inner_size, bias, eltwise, {}, {exp, scale, shift});
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void biasN_eltwise_sum_2_identity_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>);
template void biasN_eltwise_sum_2_relu_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>, __half);
template void biasN_eltwise_sum_2_clipped_relu_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>, __half, __half);
template void biasN_eltwise_sum_2_tanh_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>);
template void biasN_eltwise_sum_2_swish_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>);
template void biasN_eltwise_sum_2_mish_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>);
template void biasN_eltwise_sum_2_sigmoid_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>);
template void biasN_eltwise_sum_2_power_inplace<__half>(const Stream&, Span<__half>, std::size_t, View<__half>, View<__half>, __half, __half, __half);
#endif
template void biasN_eltwise_sum_2_identity_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>);
template void biasN_eltwise_sum_2_relu_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>, float);
template void biasN_eltwise_sum_2_clipped_relu_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>, float, float);
template void biasN_eltwise_sum_2_tanh_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>);
template void biasN_eltwise_sum_2_swish_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>);
template void biasN_eltwise_sum_2_mish_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>);
template void biasN_eltwise_sum_2_sigmoid_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>);
template void biasN_eltwise_sum_2_power_inplace<float>(const Stream&, Span<float>, std::size_t, View<float>, View<float>, float, float, float);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */
|
9c88dda1cb58003e2445bdaa575599e806c47f0f.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_mtgp32_host.h>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <math.h>
#include <rocrand/rocrand_mtgp32_11213.h>
#include "philox_random.h"
#include "philox_pytorch.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
using namespace std;
static uint64_t offset=0;
// float holdy1=pow(2.0,-10.0);
// float holdy2=pow(2.0,-24.0);
__device__ const float twoten=0.0009765625;
__device__ const float twominustwentyfour=0.000000059604644775390625;
template<typename T>
__device__ __forceinline__ T maybe_upcast(__half x){ return T(__half2float(x)); }
template<> __device__ __forceinline__ __half maybe_upcast<__half>(__half x){ return x; }
__device__ __forceinline__ float get_delta_fp16(float x){
int e_actual;
frexpf(x, &e_actual);
e_actual-=1;
// int e_actual=e_stored-127;
if(e_actual>=-14){
return twoten*pow(2,e_actual);
}
else{
return twominustwentyfour;
}
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t natalia_magic(float x,hiprandStatePhilox4_32_10_t state){
float delta=get_delta_fp16(x);
float randy=hiprand_uniform(&state);
float val=x+randy*delta;
// To guarantee representability, route through a guaranteed FP16 cast.
return maybe_upcast<scalar_t>(__float2half_rd(val));
}
template <typename scalar_t>
__global__ void stochround(float* mtx,scalar_t* new_mtx, int n, uint64_t seed, uint64_t offset){
int threadnum=blockDim.x*blockIdx.x+threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed,threadnum,offset,&state);
for(int i = threadnum; i <n ; i +=blockDim.x*gridDim.x ){
float mtx_holder=static_cast<float>(mtx[i]);
new_mtx[i]=natalia_magic<scalar_t>(mtx_holder,state);
}
}
torch::Tensor stochroundfortensor(torch::Tensor mtx,torch::Tensor half_mtx){
torch::IntArrayRef sizes=mtx.sizes();
int dims=sizes.size();
size_t n = 1;
for(int county=0;county<dims;county++){
n=n*sizes[county];
}
uint64_t seed= 12345ul;
const int threads = 256.0;
// printf("%d \n \n \n \n ",offset);
float sm_max=72.0;
float numthreads_per_sm=1024.0;
const dim3 blocks(ceil(sm_max*numthreads_per_sm/threads),1,1);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(half_mtx.scalar_type(),"stochastic_tensor_round",([&] hipLaunchKernelGGL(({stochround<scalar_t>), dim3(blocks), dim3(threads), 0, 0, mtx.data<float>(),half_mtx.data<scalar_t>(),n,seed,offset);}));
offset = offset + (n + blocks.x*threads - 1)/(blocks.x*threads);
// printf("%d \n \n \n \n ",offset);
return half_mtx;
}
| 9c88dda1cb58003e2445bdaa575599e806c47f0f.cu | #include <torch/extension.h>
#include <curand_kernel.h>
#include <curand.h>
#include <curand_mtgp32_host.h>
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <vector>
#include <math.h>
#include <curand_mtgp32dc_p_11213.h>
#include "philox_random.h"
#include "philox_pytorch.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
using namespace std;
static uint64_t offset=0;
// float holdy1=pow(2.0,-10.0);
// float holdy2=pow(2.0,-24.0);
__device__ const float twoten=0.0009765625;
__device__ const float twominustwentyfour=0.000000059604644775390625;
template<typename T>
__device__ __forceinline__ T maybe_upcast(__half x){ return T(__half2float(x)); }
template<> __device__ __forceinline__ __half maybe_upcast<__half>(__half x){ return x; }
__device__ __forceinline__ float get_delta_fp16(float x){
int e_actual;
frexpf(x, &e_actual);
e_actual-=1;
// int e_actual=e_stored-127;
if(e_actual>=-14){
return twoten*pow(2,e_actual);
}
else{
return twominustwentyfour;
}
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t natalia_magic(float x,curandStatePhilox4_32_10_t state){
float delta=get_delta_fp16(x);
float randy=curand_uniform(&state);
float val=x+randy*delta;
// To guarantee representability, route through a guaranteed FP16 cast.
return maybe_upcast<scalar_t>(__float2half_rd(val));
}
template <typename scalar_t>
__global__ void stochround(float* mtx,scalar_t* new_mtx, int n, uint64_t seed, uint64_t offset){
int threadnum=blockDim.x*blockIdx.x+threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(seed,threadnum,offset,&state);
for(int i = threadnum; i <n ; i +=blockDim.x*gridDim.x ){
float mtx_holder=static_cast<float>(mtx[i]);
new_mtx[i]=natalia_magic<scalar_t>(mtx_holder,state);
}
}
torch::Tensor stochroundfortensor(torch::Tensor mtx,torch::Tensor half_mtx){
torch::IntArrayRef sizes=mtx.sizes();
int dims=sizes.size();
size_t n = 1;
for(int county=0;county<dims;county++){
n=n*sizes[county];
}
uint64_t seed= 12345ul;
const int threads = 256.0;
// printf("%d \n \n \n \n ",offset);
float sm_max=72.0;
float numthreads_per_sm=1024.0;
const dim3 blocks(ceil(sm_max*numthreads_per_sm/threads),1,1);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(half_mtx.scalar_type(),"stochastic_tensor_round",([&] {stochround<scalar_t><<<blocks, threads>>>(mtx.data<float>(),half_mtx.data<scalar_t>(),n,seed,offset);}));
offset = offset + (n + blocks.x*threads - 1)/(blocks.x*threads);
// printf("%d \n \n \n \n ",offset);
return half_mtx;
}
|
ecf5264997cea427268c9b4ddcb6970609075e23.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2017-2018 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <hip/hip_runtime.h>
#include "NvCodecUtils.h"
template<typename YuvUnitx2>
static __global__ void Resize(hipTextureObject_t texY, hipTextureObject_t texUv,
uint8_t *pDst, uint8_t *pDstUV, int nPitch, int nWidth, int nHeight,
float fxScale, float fyScale)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x,
iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= nWidth / 2 || iy >= nHeight / 2) {
return;
}
int x = ix * 2, y = iy * 2;
typedef decltype(YuvUnitx2::x) YuvUnit;
const int MAX = 1 << (sizeof(YuvUnit) * 8);
*(YuvUnitx2 *)(pDst + y * nPitch + x * sizeof(YuvUnit)) = YuvUnitx2 {
(YuvUnit)(tex2D<float>(texY, x / fxScale, y / fyScale) * MAX),
(YuvUnit)(tex2D<float>(texY, (x + 1) / fxScale, y / fyScale) * MAX)
};
y++;
*(YuvUnitx2 *)(pDst + y * nPitch + x * sizeof(YuvUnit)) = YuvUnitx2 {
(YuvUnit)(tex2D<float>(texY, x / fxScale, y / fyScale) * MAX),
(YuvUnit)(tex2D<float>(texY, (x + 1) / fxScale, y / fyScale) * MAX)
};
//float2 uv = tex2D<float2>(texUv, ix / fxScale, (nHeight + iy) / fyScale + 0.5f);
//*(YuvUnitx2 *)(pDstUV + iy * nPitch + ix * 2 * sizeof(YuvUnit)) = YuvUnitx2{ (YuvUnit)(uv.x * MAX), (YuvUnit)(uv.y * MAX) };
}
template <typename YuvUnitx2>
static void Resize(unsigned char *dpDst, unsigned char* dpDstUV, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrc, int nSrcPitch, int nSrcWidth, int nSrcHeight) {
hipResourceDesc resDesc = {};
resDesc.resType = hipResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = dpSrc;
resDesc.res.pitch2D.desc = hipCreateChannelDesc<decltype(YuvUnitx2::x)>();
resDesc.res.pitch2D.width = nSrcWidth;
resDesc.res.pitch2D.height = nSrcHeight;
resDesc.res.pitch2D.pitchInBytes = nSrcPitch;
hipTextureDesc texDesc = {};
texDesc.filterMode = hipFilterModeLinear;
texDesc.readMode = hipReadModeNormalizedFloat;
hipTextureObject_t texY=0;
ck(hipCreateTextureObject(&texY, &resDesc, &texDesc, NULL));
resDesc.res.pitch2D.desc = hipCreateChannelDesc<YuvUnitx2>();
resDesc.res.pitch2D.width = nSrcWidth / 2;
resDesc.res.pitch2D.height = nSrcHeight * 3 / 2;
hipTextureObject_t texUv=0;
ck(hipCreateTextureObject(&texUv, &resDesc, &texDesc, NULL));
Resize<YuvUnitx2> << <dim3((nDstWidth + 31) / 32, (nDstHeight + 31) / 32), dim3(16, 16) >> >(texY, texUv, dpDst, dpDstUV,
nDstPitch, nDstWidth, nDstHeight, 1.0f * nDstWidth / nSrcWidth, 1.0f * nDstHeight / nSrcHeight);
ck(hipDestroyTextureObject(texY));
ck(hipDestroyTextureObject(texUv));
}
void ResizeNv12(unsigned char *dpDstNv12, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrcNv12, int nSrcPitch, int nSrcWidth, int nSrcHeight, unsigned char* dpDstNv12UV)
{
unsigned char* dpDstUV = dpDstNv12UV ? dpDstNv12UV : dpDstNv12 + (nDstPitch*nDstHeight);
return Resize<uchar2>(dpDstNv12, dpDstUV, nDstPitch, nDstWidth, nDstHeight, dpSrcNv12, nSrcPitch, nSrcWidth, nSrcHeight);
}
void ResizeP016(unsigned char *dpDstP016, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrcP016, int nSrcPitch, int nSrcWidth, int nSrcHeight, unsigned char* dpDstP016UV)
{
unsigned char* dpDstUV = dpDstP016UV ? dpDstP016UV : dpDstP016 + (nDstPitch*nDstHeight);
return Resize<ushort2>(dpDstP016, dpDstUV, nDstPitch, nDstWidth, nDstHeight, dpSrcP016, nSrcPitch, nSrcWidth, nSrcHeight);
}
static __global__ void Scale(hipTextureObject_t texSrc,
uint8_t *pDst, int nPitch, int nWidth, int nHeight,
float fxScale, float fyScale)
{
int x = blockIdx.x * blockDim.x + threadIdx.x,
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= nWidth || y >= nHeight)
{
return;
}
*(unsigned char*)(pDst + (y * nPitch) + x) = (unsigned char)(fminf((tex2D<float>(texSrc, x * fxScale, y * fyScale)) * 255.0f, 255.0f));
}
static __global__ void Scale_uv(hipTextureObject_t texSrc,
uint8_t *pDst, int nPitch, int nWidth, int nHeight,
float fxScale, float fyScale)
{
int x = blockIdx.x * blockDim.x + threadIdx.x,
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= nWidth || y >= nHeight)
{
return;
}
float2 uv = tex2D<float2>(texSrc, x * fxScale, y * fyScale);
uchar2 uvOut = uchar2{ (unsigned char)(fminf(uv.x * 255.0f, 255.0f)), (unsigned char)(fminf(uv.y * 255.0f, 255.0f)) };
*(uchar2*)(pDst + (y * nPitch) + 2 * x) = uvOut;
}
void ScaleKernelLaunch(unsigned char *dpDst, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrc, int nSrcPitch, int nSrcWidth, int nSrcHeight, bool bUVPlane = false)
{
hipResourceDesc resDesc = {};
resDesc.resType = hipResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = dpSrc;
resDesc.res.pitch2D.desc = bUVPlane ? hipCreateChannelDesc<uchar2>() : hipCreateChannelDesc<unsigned char>();
resDesc.res.pitch2D.width = nSrcWidth;
resDesc.res.pitch2D.height = nSrcHeight;
resDesc.res.pitch2D.pitchInBytes = nSrcPitch;
hipTextureDesc texDesc = {};
texDesc.filterMode = hipFilterModeLinear;
texDesc.readMode = hipReadModeNormalizedFloat;
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.addressMode[1] = hipAddressModeClamp;
texDesc.addressMode[2] = hipAddressModeClamp;
hipTextureObject_t texSrc = 0;
ck(hipCreateTextureObject(&texSrc, &resDesc, &texDesc, NULL));
dim3 blockSize(16, 16, 1);
dim3 gridSize(((uint32_t)nDstWidth + blockSize.x - 1) / blockSize.x, ((uint32_t)nDstHeight + blockSize.y - 1) / blockSize.y, 1);
if (bUVPlane)
{
Scale_uv << <gridSize, blockSize >> >(texSrc, dpDst,
nDstPitch, nDstWidth, nDstHeight, 1.0f * nSrcWidth / nDstWidth, 1.0f * nSrcHeight / nDstHeight);
}
else
{
Scale << <gridSize, blockSize >> >(texSrc, dpDst,
nDstPitch, nDstWidth, nDstHeight, 1.0f * nSrcWidth / nDstWidth, 1.0f * nSrcHeight / nDstHeight);
}
ck(hipGetLastError());
ck(hipDestroyTextureObject(texSrc));
}
void ScaleYUV420(unsigned char *dpDstY,
unsigned char* dpDstU,
unsigned char* dpDstV,
int nDstPitch,
int nDstChromaPitch,
int nDstWidth,
int nDstHeight,
unsigned char *dpSrcY,
unsigned char* dpSrcU,
unsigned char* dpSrcV,
int nSrcPitch,
int nSrcChromaPitch,
int nSrcWidth,
int nSrcHeight,
bool bSemiplanar)
{
int chromaWidthDst = (nDstWidth + 1) / 2;
int chromaHeightDst = (nDstHeight + 1) / 2;
int chromaWidthSrc = (nSrcWidth + 1) / 2;
int chromaHeightSrc = (nSrcHeight + 1) / 2;
ScaleKernelLaunch(dpDstY, nDstPitch, nDstWidth, nDstHeight, dpSrcY, nSrcPitch, nSrcWidth, nSrcHeight);
if (bSemiplanar)
{
ScaleKernelLaunch(dpDstU, nDstChromaPitch, chromaWidthDst, chromaHeightDst, dpSrcU, nSrcChromaPitch, chromaWidthSrc, chromaHeightSrc, true);
}
else
{
ScaleKernelLaunch(dpDstU, nDstChromaPitch, chromaWidthDst, chromaHeightDst, dpSrcU, nSrcChromaPitch, chromaWidthSrc, chromaHeightSrc);
ScaleKernelLaunch(dpDstV, nDstChromaPitch, chromaWidthDst, chromaHeightDst, dpSrcV, nSrcChromaPitch, chromaWidthSrc, chromaHeightSrc);
}
}
| ecf5264997cea427268c9b4ddcb6970609075e23.cu | /*
* Copyright 2017-2018 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <cuda_runtime.h>
#include "NvCodecUtils.h"
template<typename YuvUnitx2>
static __global__ void Resize(cudaTextureObject_t texY, cudaTextureObject_t texUv,
uint8_t *pDst, uint8_t *pDstUV, int nPitch, int nWidth, int nHeight,
float fxScale, float fyScale)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x,
iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= nWidth / 2 || iy >= nHeight / 2) {
return;
}
int x = ix * 2, y = iy * 2;
typedef decltype(YuvUnitx2::x) YuvUnit;
const int MAX = 1 << (sizeof(YuvUnit) * 8);
*(YuvUnitx2 *)(pDst + y * nPitch + x * sizeof(YuvUnit)) = YuvUnitx2 {
(YuvUnit)(tex2D<float>(texY, x / fxScale, y / fyScale) * MAX),
(YuvUnit)(tex2D<float>(texY, (x + 1) / fxScale, y / fyScale) * MAX)
};
y++;
*(YuvUnitx2 *)(pDst + y * nPitch + x * sizeof(YuvUnit)) = YuvUnitx2 {
(YuvUnit)(tex2D<float>(texY, x / fxScale, y / fyScale) * MAX),
(YuvUnit)(tex2D<float>(texY, (x + 1) / fxScale, y / fyScale) * MAX)
};
//float2 uv = tex2D<float2>(texUv, ix / fxScale, (nHeight + iy) / fyScale + 0.5f);
//*(YuvUnitx2 *)(pDstUV + iy * nPitch + ix * 2 * sizeof(YuvUnit)) = YuvUnitx2{ (YuvUnit)(uv.x * MAX), (YuvUnit)(uv.y * MAX) };
}
template <typename YuvUnitx2>
static void Resize(unsigned char *dpDst, unsigned char* dpDstUV, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrc, int nSrcPitch, int nSrcWidth, int nSrcHeight) {
cudaResourceDesc resDesc = {};
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = dpSrc;
resDesc.res.pitch2D.desc = cudaCreateChannelDesc<decltype(YuvUnitx2::x)>();
resDesc.res.pitch2D.width = nSrcWidth;
resDesc.res.pitch2D.height = nSrcHeight;
resDesc.res.pitch2D.pitchInBytes = nSrcPitch;
cudaTextureDesc texDesc = {};
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeNormalizedFloat;
cudaTextureObject_t texY=0;
ck(cudaCreateTextureObject(&texY, &resDesc, &texDesc, NULL));
resDesc.res.pitch2D.desc = cudaCreateChannelDesc<YuvUnitx2>();
resDesc.res.pitch2D.width = nSrcWidth / 2;
resDesc.res.pitch2D.height = nSrcHeight * 3 / 2;
cudaTextureObject_t texUv=0;
ck(cudaCreateTextureObject(&texUv, &resDesc, &texDesc, NULL));
Resize<YuvUnitx2> << <dim3((nDstWidth + 31) / 32, (nDstHeight + 31) / 32), dim3(16, 16) >> >(texY, texUv, dpDst, dpDstUV,
nDstPitch, nDstWidth, nDstHeight, 1.0f * nDstWidth / nSrcWidth, 1.0f * nDstHeight / nSrcHeight);
ck(cudaDestroyTextureObject(texY));
ck(cudaDestroyTextureObject(texUv));
}
void ResizeNv12(unsigned char *dpDstNv12, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrcNv12, int nSrcPitch, int nSrcWidth, int nSrcHeight, unsigned char* dpDstNv12UV)
{
unsigned char* dpDstUV = dpDstNv12UV ? dpDstNv12UV : dpDstNv12 + (nDstPitch*nDstHeight);
return Resize<uchar2>(dpDstNv12, dpDstUV, nDstPitch, nDstWidth, nDstHeight, dpSrcNv12, nSrcPitch, nSrcWidth, nSrcHeight);
}
void ResizeP016(unsigned char *dpDstP016, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrcP016, int nSrcPitch, int nSrcWidth, int nSrcHeight, unsigned char* dpDstP016UV)
{
unsigned char* dpDstUV = dpDstP016UV ? dpDstP016UV : dpDstP016 + (nDstPitch*nDstHeight);
return Resize<ushort2>(dpDstP016, dpDstUV, nDstPitch, nDstWidth, nDstHeight, dpSrcP016, nSrcPitch, nSrcWidth, nSrcHeight);
}
static __global__ void Scale(cudaTextureObject_t texSrc,
uint8_t *pDst, int nPitch, int nWidth, int nHeight,
float fxScale, float fyScale)
{
int x = blockIdx.x * blockDim.x + threadIdx.x,
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= nWidth || y >= nHeight)
{
return;
}
*(unsigned char*)(pDst + (y * nPitch) + x) = (unsigned char)(fminf((tex2D<float>(texSrc, x * fxScale, y * fyScale)) * 255.0f, 255.0f));
}
static __global__ void Scale_uv(cudaTextureObject_t texSrc,
uint8_t *pDst, int nPitch, int nWidth, int nHeight,
float fxScale, float fyScale)
{
int x = blockIdx.x * blockDim.x + threadIdx.x,
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= nWidth || y >= nHeight)
{
return;
}
float2 uv = tex2D<float2>(texSrc, x * fxScale, y * fyScale);
uchar2 uvOut = uchar2{ (unsigned char)(fminf(uv.x * 255.0f, 255.0f)), (unsigned char)(fminf(uv.y * 255.0f, 255.0f)) };
*(uchar2*)(pDst + (y * nPitch) + 2 * x) = uvOut;
}
void ScaleKernelLaunch(unsigned char *dpDst, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrc, int nSrcPitch, int nSrcWidth, int nSrcHeight, bool bUVPlane = false)
{
cudaResourceDesc resDesc = {};
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = dpSrc;
resDesc.res.pitch2D.desc = bUVPlane ? cudaCreateChannelDesc<uchar2>() : cudaCreateChannelDesc<unsigned char>();
resDesc.res.pitch2D.width = nSrcWidth;
resDesc.res.pitch2D.height = nSrcHeight;
resDesc.res.pitch2D.pitchInBytes = nSrcPitch;
cudaTextureDesc texDesc = {};
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeNormalizedFloat;
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.addressMode[1] = cudaAddressModeClamp;
texDesc.addressMode[2] = cudaAddressModeClamp;
cudaTextureObject_t texSrc = 0;
ck(cudaCreateTextureObject(&texSrc, &resDesc, &texDesc, NULL));
dim3 blockSize(16, 16, 1);
dim3 gridSize(((uint32_t)nDstWidth + blockSize.x - 1) / blockSize.x, ((uint32_t)nDstHeight + blockSize.y - 1) / blockSize.y, 1);
if (bUVPlane)
{
Scale_uv << <gridSize, blockSize >> >(texSrc, dpDst,
nDstPitch, nDstWidth, nDstHeight, 1.0f * nSrcWidth / nDstWidth, 1.0f * nSrcHeight / nDstHeight);
}
else
{
Scale << <gridSize, blockSize >> >(texSrc, dpDst,
nDstPitch, nDstWidth, nDstHeight, 1.0f * nSrcWidth / nDstWidth, 1.0f * nSrcHeight / nDstHeight);
}
ck(cudaGetLastError());
ck(cudaDestroyTextureObject(texSrc));
}
void ScaleYUV420(unsigned char *dpDstY,
unsigned char* dpDstU,
unsigned char* dpDstV,
int nDstPitch,
int nDstChromaPitch,
int nDstWidth,
int nDstHeight,
unsigned char *dpSrcY,
unsigned char* dpSrcU,
unsigned char* dpSrcV,
int nSrcPitch,
int nSrcChromaPitch,
int nSrcWidth,
int nSrcHeight,
bool bSemiplanar)
{
int chromaWidthDst = (nDstWidth + 1) / 2;
int chromaHeightDst = (nDstHeight + 1) / 2;
int chromaWidthSrc = (nSrcWidth + 1) / 2;
int chromaHeightSrc = (nSrcHeight + 1) / 2;
ScaleKernelLaunch(dpDstY, nDstPitch, nDstWidth, nDstHeight, dpSrcY, nSrcPitch, nSrcWidth, nSrcHeight);
if (bSemiplanar)
{
ScaleKernelLaunch(dpDstU, nDstChromaPitch, chromaWidthDst, chromaHeightDst, dpSrcU, nSrcChromaPitch, chromaWidthSrc, chromaHeightSrc, true);
}
else
{
ScaleKernelLaunch(dpDstU, nDstChromaPitch, chromaWidthDst, chromaHeightDst, dpSrcU, nSrcChromaPitch, chromaWidthSrc, chromaHeightSrc);
ScaleKernelLaunch(dpDstV, nDstChromaPitch, chromaWidthDst, chromaHeightDst, dpSrcV, nSrcChromaPitch, chromaWidthSrc, chromaHeightSrc);
}
}
|
0eff00a1cad4c210acb272318996e692f5d636e6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Python.h>
#include <arrayobject.h>
#include <assert.h>
#include <helper_cuda.h>
#include <rocblas.h>
#include <time.h>
#include <vector>
#include <matrix.h>
#include <queue.h>
#include <worker.cuh>
#include <util.cuh>
#include <cost.cuh>
#include <pyconvnet.cuh>
#include <convnet.cuh>
using namespace std;
static ConvNet* model = NULL;
static PyMethodDef _ConvNetMethods[] = { { "initModel", initModel, METH_VARARGS },
{ "startBatch", startBatch, METH_VARARGS },
{ "finishBatch", finishBatch, METH_VARARGS },
{ "checkGradients", checkGradients, METH_VARARGS },
{ "startMultiviewTest", startMultiviewTest, METH_VARARGS },
{ "startFeatureWriter", startFeatureWriter, METH_VARARGS },
{ "syncWithHost", syncWithHost, METH_VARARGS },
{ NULL, NULL }
};
#if defined(_WIN64) || defined(_WIN32)
extern "C" __declspec(dllexport) void initpyconvnet() {
(void) Py_InitModule("pyconvnet", _ConvNetMethods);
import_array();
}
#else
void INITNAME() {
(void) Py_InitModule(QUOTEME(MODELNAME), _ConvNetMethods);
import_array();
}
#endif
PyObject* initModel(PyObject *self, PyObject *args) {
assert(model == NULL);
PyListObject* pyLayerParams;
int pyMinibatchSize;
int pyDeviceID;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &pyLayerParams,
&pyMinibatchSize,
&pyDeviceID)) {
return NULL;
}
cout << "Before new" << endl;
model = new ConvNet(pyLayerParams,
pyMinibatchSize,
pyDeviceID);
cout << "Before start" << endl;
model->start();
cout << "After start" << endl;
return Py_BuildValue("i", 0);
}
/*
* Starts training/testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int test = 0;
if (!PyArg_ParseTuple(args, "O!|i",
&PyList_Type, &data,
&test)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
TrainingWorker* wr = new TrainingWorker(*model, *new CPUData(mvec), test);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Starts testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startMultiviewTest(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int numViews, logregIdx;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &data,
&numViews,
&logregIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
MultiviewTestWorker* wr = new MultiviewTestWorker(*model, *new CPUData(mvec), numViews, logregIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
PyObject* startFeatureWriter(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int layerIdx;
if (!PyArg_ParseTuple(args, "O!i",
&PyList_Type, &data,
&layerIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
Matrix& ftrs = *mvec.back();
mvec.pop_back();
FeatureWorker* wr = new FeatureWorker(*model, *new CPUData(mvec), ftrs, layerIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Waits for the trainer to finish training on the batch given to startBatch.
*/
PyObject* finishBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
Cost& cost = res->getResults();
PyObject* dict = PyDict_New();
CostMap& costMap = cost.getCostMap();
for (CostMap::const_iterator it = costMap.begin(); it != costMap.end(); ++it) {
PyObject* v = PyList_New(0);
for (vector<double>::const_iterator iv = it->second->begin(); iv != it->second->end(); ++iv) {
PyObject* f = PyFloat_FromDouble(*iv);
PyList_Append(v, f);
}
PyDict_SetItemString(dict, it->first.c_str(), v);
}
PyObject* retVal = Py_BuildValue("Ni", dict, cost.getNumCases());
delete res; // Deletes cost too
return retVal;
}
PyObject* checkGradients(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
if (!PyArg_ParseTuple(args, "O!",
&PyList_Type, &data)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
GradCheckWorker* wr = new GradCheckWorker(*model, *new CPUData(mvec));
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
delete res;
return Py_BuildValue("i", 0);
}
/*
* Copies weight matrices from GPU to system memory.
*/
PyObject* syncWithHost(PyObject *self, PyObject *args) {
assert(model != NULL);
SyncWorker* wr = new SyncWorker(*model);
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::SYNC_DONE);
delete res;
return Py_BuildValue("i", 0);
}
| 0eff00a1cad4c210acb272318996e692f5d636e6.cu | /*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Python.h>
#include <arrayobject.h>
#include <assert.h>
#include <helper_cuda.h>
#include <cublas.h>
#include <time.h>
#include <vector>
#include <matrix.h>
#include <queue.h>
#include <worker.cuh>
#include <util.cuh>
#include <cost.cuh>
#include <pyconvnet.cuh>
#include <convnet.cuh>
using namespace std;
static ConvNet* model = NULL;
static PyMethodDef _ConvNetMethods[] = { { "initModel", initModel, METH_VARARGS },
{ "startBatch", startBatch, METH_VARARGS },
{ "finishBatch", finishBatch, METH_VARARGS },
{ "checkGradients", checkGradients, METH_VARARGS },
{ "startMultiviewTest", startMultiviewTest, METH_VARARGS },
{ "startFeatureWriter", startFeatureWriter, METH_VARARGS },
{ "syncWithHost", syncWithHost, METH_VARARGS },
{ NULL, NULL }
};
#if defined(_WIN64) || defined(_WIN32)
extern "C" __declspec(dllexport) void initpyconvnet() {
(void) Py_InitModule("pyconvnet", _ConvNetMethods);
import_array();
}
#else
void INITNAME() {
(void) Py_InitModule(QUOTEME(MODELNAME), _ConvNetMethods);
import_array();
}
#endif
PyObject* initModel(PyObject *self, PyObject *args) {
assert(model == NULL);
PyListObject* pyLayerParams;
int pyMinibatchSize;
int pyDeviceID;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &pyLayerParams,
&pyMinibatchSize,
&pyDeviceID)) {
return NULL;
}
cout << "Before new" << endl;
model = new ConvNet(pyLayerParams,
pyMinibatchSize,
pyDeviceID);
cout << "Before start" << endl;
model->start();
cout << "After start" << endl;
return Py_BuildValue("i", 0);
}
/*
* Starts training/testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int test = 0;
if (!PyArg_ParseTuple(args, "O!|i",
&PyList_Type, &data,
&test)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
TrainingWorker* wr = new TrainingWorker(*model, *new CPUData(mvec), test);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Starts testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startMultiviewTest(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int numViews, logregIdx;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &data,
&numViews,
&logregIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
MultiviewTestWorker* wr = new MultiviewTestWorker(*model, *new CPUData(mvec), numViews, logregIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
PyObject* startFeatureWriter(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int layerIdx;
if (!PyArg_ParseTuple(args, "O!i",
&PyList_Type, &data,
&layerIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
Matrix& ftrs = *mvec.back();
mvec.pop_back();
FeatureWorker* wr = new FeatureWorker(*model, *new CPUData(mvec), ftrs, layerIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Waits for the trainer to finish training on the batch given to startBatch.
*/
PyObject* finishBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
Cost& cost = res->getResults();
PyObject* dict = PyDict_New();
CostMap& costMap = cost.getCostMap();
for (CostMap::const_iterator it = costMap.begin(); it != costMap.end(); ++it) {
PyObject* v = PyList_New(0);
for (vector<double>::const_iterator iv = it->second->begin(); iv != it->second->end(); ++iv) {
PyObject* f = PyFloat_FromDouble(*iv);
PyList_Append(v, f);
}
PyDict_SetItemString(dict, it->first.c_str(), v);
}
PyObject* retVal = Py_BuildValue("Ni", dict, cost.getNumCases());
delete res; // Deletes cost too
return retVal;
}
PyObject* checkGradients(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
if (!PyArg_ParseTuple(args, "O!",
&PyList_Type, &data)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
GradCheckWorker* wr = new GradCheckWorker(*model, *new CPUData(mvec));
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
delete res;
return Py_BuildValue("i", 0);
}
/*
* Copies weight matrices from GPU to system memory.
*/
PyObject* syncWithHost(PyObject *self, PyObject *args) {
assert(model != NULL);
SyncWorker* wr = new SyncWorker(*model);
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::SYNC_DONE);
delete res;
return Py_BuildValue("i", 0);
}
|
f7fcaac71a24d79941019e52964bd6055e41e467.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "lap.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int nx = 1;
int ny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
lap), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,nx,ny);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
lap), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,nx,ny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
lap), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,nx,ny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f7fcaac71a24d79941019e52964bd6055e41e467.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "lap.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int nx = 1;
int ny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
lap<<<gridBlock,threadBlock>>>(a,b,nx,ny);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
lap<<<gridBlock,threadBlock>>>(a,b,nx,ny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
lap<<<gridBlock,threadBlock>>>(a,b,nx,ny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
50d4f731ea7b1441f2a9276981b676ebfeb20d67.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/relu_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
#ifdef __HIPCC__
typedef __half2 half2;
#endif
template <typename T>
__global__ void ReluCUDAKernel(const int N, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
Y[i] = __ldg(X + i) > 0 ? __ldg(X + i) : T(0);
#else
Y[i] = X[i] > 0 ? X[i] : T(0);
#endif
}
}
__global__ void ReluHalfCUDAKernel(const int N, const half* X, half* Y) {
const half kZero = __float2half(0.0f);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
Y[i] = __hgt(__ldg(X + i), kZero) ? __ldg(X + i) : kZero;
#else
Y[i] = (__half2float(X[i]) > 0) ? X[i] : kZero;
#endif
}
}
__global__ void ReluHalf2CUDAKernel(const int N, const half2* X, half2* Y) {
const half2 kZero = __float2half2_rn(0.0f);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
Y[i] = __hmul2(__hgt2(__ldg(X + i), kZero), __ldg(X + i));
#else
const float2 xx = __half22float2(X[i]);
Y[i] = __floats2half2_rn(xx.x > 0 ? xx.x : 0.f, xx.y > 0 ? xx.y : 0.f);
#endif
}
}
template <typename T>
__global__ void
ReluGradientCUDAKernel(const int N, const T* dY, const T* Y, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(Y + i) > 0 ? __ldg(dY + i) : 0;
#else
dX[i] = Y[i] > 0 ? dY[i] : 0;
#endif
}
}
__global__ void ReluGradientHalfCUDAKernel(
const int N,
const half* dY,
const half* Y,
half* dX) {
const half kZero = __float2half(0.0f);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
dX[i] = __hgt(__ldg(Y + i), kZero) ? __ldg(dY + i) : kZero;
#else
dX[i] = (__half2float(Y[i]) > 0) ? dY[i] : kZero;
#endif
}
}
__global__ void ReluGradientHalf2CUDAKernel(
const int N,
const half2* dY,
const half2* Y,
half2* dX) {
const half2 kZero = __float2half2_rn(0.0f);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
dX[i] = __hmul2(__hgt2(__ldg(Y + i), kZero), __ldg(dY + i));
#else
const float2 dy = __half22float2(dY[i]);
const float2 yy = __half22float2(Y[i]);
dX[i] = __floats2half2_rn(yy.x > 0 ? dy.x : 0.f, yy.y > 0 ? dy.y : 0.f);
#endif
}
}
} // namespace
template <>
template <typename T>
bool ReluFunctor<CUDAContext>::
operator()(const int N, const T* X, T* Y, CUDAContext* context) const {
hipLaunchKernelGGL(( ReluCUDAKernel<T>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, X, Y);
return true;
}
template <>
template <>
bool ReluFunctor<CUDAContext>::operator()<at::Half>(
const int N,
const at::Half* X,
at::Half* Y,
CUDAContext* context) const {
if ((N & 1) == 0) {
hipLaunchKernelGGL(( ReluHalf2CUDAKernel),
dim3(CAFFE_GET_BLOCKS((N >> 1))),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
(N >> 1),
reinterpret_cast<const half2*>(X),
reinterpret_cast<half2*>(Y));
} else {
hipLaunchKernelGGL(( ReluHalfCUDAKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
N, reinterpret_cast<const half*>(X), reinterpret_cast<half*>(Y));
}
return true;
}
template <>
template <typename T>
bool ReluGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const T* Y,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( ReluGradientCUDAKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, dY, Y, dX);
return true;
}
template <>
template <>
bool ReluGradientFunctor<CUDAContext>::Forward<at::Half>(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const at::Half* Y,
const at::Half* dY,
at::Half* dX,
CUDAContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
if ((size & 1) == 0) {
hipLaunchKernelGGL(( ReluGradientHalf2CUDAKernel),
dim3(CAFFE_GET_BLOCKS((size >> 1))),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
(size >> 1),
reinterpret_cast<const half2*>(dY),
reinterpret_cast<const half2*>(Y),
reinterpret_cast<half2*>(dX));
} else {
hipLaunchKernelGGL(( ReluGradientHalfCUDAKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
size,
reinterpret_cast<const half*>(dY),
reinterpret_cast<const half*>(Y),
reinterpret_cast<half*>(dX));
}
return true;
}
REGISTER_CUDA_OPERATOR(
Relu,
UnaryElementwiseOp<
TensorTypes<float, at::Half>,
CUDAContext,
ReluFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
ReluGradient,
BinaryElementwiseOp<
TensorTypes<float, at::Half>,
CUDAContext,
ReluGradientFunctor<CUDAContext>>);
} // namespace caffe2
| 50d4f731ea7b1441f2a9276981b676ebfeb20d67.cu | #include "caffe2/operators/relu_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
#ifdef __HIPCC__
typedef __half2 half2;
#endif
template <typename T>
__global__ void ReluCUDAKernel(const int N, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
Y[i] = __ldg(X + i) > 0 ? __ldg(X + i) : T(0);
#else
Y[i] = X[i] > 0 ? X[i] : T(0);
#endif
}
}
__global__ void ReluHalfCUDAKernel(const int N, const half* X, half* Y) {
const half kZero = __float2half(0.0f);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
Y[i] = __hgt(__ldg(X + i), kZero) ? __ldg(X + i) : kZero;
#else
Y[i] = (__half2float(X[i]) > 0) ? X[i] : kZero;
#endif
}
}
__global__ void ReluHalf2CUDAKernel(const int N, const half2* X, half2* Y) {
const half2 kZero = __float2half2_rn(0.0f);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
Y[i] = __hmul2(__hgt2(__ldg(X + i), kZero), __ldg(X + i));
#else
const float2 xx = __half22float2(X[i]);
Y[i] = __floats2half2_rn(xx.x > 0 ? xx.x : 0.f, xx.y > 0 ? xx.y : 0.f);
#endif
}
}
template <typename T>
__global__ void
ReluGradientCUDAKernel(const int N, const T* dY, const T* Y, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(Y + i) > 0 ? __ldg(dY + i) : 0;
#else
dX[i] = Y[i] > 0 ? dY[i] : 0;
#endif
}
}
__global__ void ReluGradientHalfCUDAKernel(
const int N,
const half* dY,
const half* Y,
half* dX) {
const half kZero = __float2half(0.0f);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
dX[i] = __hgt(__ldg(Y + i), kZero) ? __ldg(dY + i) : kZero;
#else
dX[i] = (__half2float(Y[i]) > 0) ? dY[i] : kZero;
#endif
}
}
__global__ void ReluGradientHalf2CUDAKernel(
const int N,
const half2* dY,
const half2* Y,
half2* dX) {
const half2 kZero = __float2half2_rn(0.0f);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
dX[i] = __hmul2(__hgt2(__ldg(Y + i), kZero), __ldg(dY + i));
#else
const float2 dy = __half22float2(dY[i]);
const float2 yy = __half22float2(Y[i]);
dX[i] = __floats2half2_rn(yy.x > 0 ? dy.x : 0.f, yy.y > 0 ? dy.y : 0.f);
#endif
}
}
} // namespace
template <>
template <typename T>
bool ReluFunctor<CUDAContext>::
operator()(const int N, const T* X, T* Y, CUDAContext* context) const {
ReluCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, X, Y);
return true;
}
template <>
template <>
bool ReluFunctor<CUDAContext>::operator()<at::Half>(
const int N,
const at::Half* X,
at::Half* Y,
CUDAContext* context) const {
if ((N & 1) == 0) {
ReluHalf2CUDAKernel<<<
CAFFE_GET_BLOCKS((N >> 1)),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
(N >> 1),
reinterpret_cast<const half2*>(X),
reinterpret_cast<half2*>(Y));
} else {
ReluHalfCUDAKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
N, reinterpret_cast<const half*>(X), reinterpret_cast<half*>(Y));
}
return true;
}
template <>
template <typename T>
bool ReluGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const T* Y,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
ReluGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, Y, dX);
return true;
}
template <>
template <>
bool ReluGradientFunctor<CUDAContext>::Forward<at::Half>(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const at::Half* Y,
const at::Half* dY,
at::Half* dX,
CUDAContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
if ((size & 1) == 0) {
ReluGradientHalf2CUDAKernel<<<
CAFFE_GET_BLOCKS((size >> 1)),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
(size >> 1),
reinterpret_cast<const half2*>(dY),
reinterpret_cast<const half2*>(Y),
reinterpret_cast<half2*>(dX));
} else {
ReluGradientHalfCUDAKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
size,
reinterpret_cast<const half*>(dY),
reinterpret_cast<const half*>(Y),
reinterpret_cast<half*>(dX));
}
return true;
}
REGISTER_CUDA_OPERATOR(
Relu,
UnaryElementwiseOp<
TensorTypes<float, at::Half>,
CUDAContext,
ReluFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
ReluGradient,
BinaryElementwiseOp<
TensorTypes<float, at::Half>,
CUDAContext,
ReluGradientFunctor<CUDAContext>>);
} // namespace caffe2
|
c1f2810f484e892aaa7beae8738785f38db73683.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by Mike on 6/16/2021.
//
#include <glm/gtx/component_wise.hpp>
#include <gpu/cuda_octree.h>
[[nodiscard]] SV_XPU inline auto _intersect_box(Ray ray, glm::vec3 bbox_min, float bbox_r) noexcept {
auto bbox_max = bbox_min + bbox_r;
auto t_min = (bbox_min - ray.o) / ray.d;
auto t_max = (bbox_max - ray.o) / ray.d;
auto o_mat = glm::mat3{ray.o, ray.o, ray.o};
glm::vec3 p_min[]{ray.o + t_min.x * ray.d, ray.o + t_min.y * ray.d, ray.o + t_min.z * ray.d};
glm::vec3 p_max[]{ray.o + t_max.x * ray.d, ray.o + t_max.y * ray.d, ray.o + t_max.z * ray.d};
auto valid = [ray, bbox_min, bbox_max](auto t, auto p) noexcept {
return glm::not_(glm::isnan(t))
&& glm::greaterThanEqual(t, glm::vec3{ray.t_min})
&& glm::lessThanEqual(t, glm::vec3{ray.t_max})
&& glm::bvec3{p[0].y >= bbox_min.y && p[0].y <= bbox_max.y
&& p[0].z >= bbox_min.z && p[0].z <= bbox_max.z,
p[1].z >= bbox_min.z && p[1].z <= bbox_max.z
&& p[1].x >= bbox_min.x && p[1].x <= bbox_max.x,
p[2].x >= bbox_min.x && p[2].x <= bbox_max.x
&& p[2].y >= bbox_min.y && p[2].y <= bbox_max.y};
};
auto t_invalid = glm::vec3{std::numeric_limits<float>::max()};
auto valid_min = valid(t_min, p_min);
auto valid_max = valid(t_max, p_max);
t_min = glm::mix(t_invalid, t_min, valid_min);
t_max = glm::mix(t_invalid, t_max, valid_max);
auto t = glm::min(t_min, t_max);
auto is_min = glm::lessThanEqual(t_min, t_max);
Hit hit{};
hit.t = std::numeric_limits<float>::max();
hit.valid = false;
if (t.x < hit.t) {
hit.p = is_min.x ? p_min[0] : p_max[0];
hit.t = t.x;
hit.ng = {is_min.x ? -1.0f : 1.0f, 0.0f, 0.0f};
hit.valid = valid_min.x || valid_max.x;
}
if (t.y < hit.t) {
hit.p = is_min.y ? p_min[1] : p_max[1];
hit.t = t.y;
hit.ng = {0.0f, is_min.y ? -1.0f : 1.0f, 0.0f};
hit.valid = valid_min.y || valid_max.y;
}
if (t.z < hit.t) {
hit.p = is_min.z ? p_min[2] : p_max[2];
hit.t = t.z;
hit.ng = {0.0f, 0.0f, is_min.z ? -1.0f : 1.0f};
hit.valid = valid_min.z || valid_max.z;
}
return hit;
}
static constexpr auto m000 = 0b00000001u;
static constexpr auto m001 = 0b00000010u;
static constexpr auto m011 = 0b00000100u;
static constexpr auto m010 = 0b00001000u;
static constexpr auto m110 = 0b00010000u;
static constexpr auto m111 = 0b00100000u;
static constexpr auto m101 = 0b01000000u;
static constexpr auto m100 = 0b10000000u;
__constant__ uint32_t _node_m[]{m000, m001, m011, m010, m110, m111, m101, m100};
static constexpr auto d000 = float3{0.0f, 0.0f, 0.0f};
static constexpr auto d001 = float3{0.0f, 0.0f, 1.0f};
static constexpr auto d010 = float3{0.0f, 1.0f, 0.0f};
static constexpr auto d011 = float3{0.0f, 1.0f, 1.0f};
static constexpr auto d100 = float3{1.0f, 0.0f, 0.0f};
static constexpr auto d101 = float3{1.0f, 0.0f, 1.0f};
static constexpr auto d110 = float3{1.0f, 1.0f, 0.0f};
static constexpr auto d111 = float3{1.0f, 1.0f, 1.0f};
__constant__ float3 _node_d[]{d000, d001, d011, d010, d110, d111, d101, d100};
__global__ void octree_trace_closest(const CUDAOctree::Node *nodes, uint32_t resolution, const Ray *rays, Hit *hits, uint32_t w, uint32_t h) {
auto x = threadIdx.x + blockIdx.x * blockDim.x;
auto y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= w || y >= h) { return; }
auto tid = x + y * w;
Hit closest{};
closest.t = std::numeric_limits<float>::max();
closest.valid = false;
if (nodes[0].empty()) {
hits[tid] = closest;
return;
}
struct alignas(16) TraceContext {
glm::vec3 o;
float r;
uint32_t index;
float t;
};
static constexpr auto stack_size = 32u;
TraceContext stack[stack_size];
auto sp = 0u;
auto ray = rays[tid];
auto add_node = [&sp, ray, &stack, &closest, nodes](auto index, auto o, auto r) noexcept {
if (auto hit = _intersect_box(ray, o, r); hit.valid && hit.t < closest.t) {
auto node = nodes[index];
if (node.full()) {
closest = hit;
} else if (r == 2.0f) {
#pragma unroll
for (auto i = 0u; i < 8u; i++) {
if ((node.child_masks() & _node_m[i])) {
auto d = _node_d[i];
if (auto child_hit = _intersect_box(ray, o + glm::vec3{d.x, d.y, d.z}, 1.0f);
child_hit.valid && child_hit.t < closest.t) {
closest = child_hit;
}
}
}
} else {
if (sp == stack_size) { printf("warning: stack overflows\n"); }
stack[sp++] = {o, r, index, hit.t};
}
}
};
add_node(0u, glm::vec3{}, static_cast<float>(resolution));
while (sp != 0u) {
auto ctx = stack[--sp];
if (ctx.t >= closest.t) { continue; }
auto node = nodes[ctx.index];
auto half_r = ctx.r * 0.5f;
#pragma unroll
for (auto i = 0u; i < 8u; i++) {
if (node.child_masks() & _node_m[i]) {
auto d = _node_d[i];
add_node(ctx.index + node.child_offset() + i, ctx.o + glm::vec3{d.x, d.y, d.z} * half_r, half_r);
}
}
}
hits[tid] = closest;
}
void CUDAOctree::trace_closest(const Ray *rays, Hit *hits, uint32_t width, uint32_t height) const noexcept {
static constexpr auto block_size = 16u;
auto blocks_x = (width + block_size - 1u) / block_size;
auto blocks_y = (height + block_size - 1u) / block_size;
hipLaunchKernelGGL(( octree_trace_closest), dim3(dim3(blocks_x, blocks_y)), dim3(dim3(block_size, block_size)), 0, 0, _nodes->data(), _resolution, rays, hits, width, height);
}
| c1f2810f484e892aaa7beae8738785f38db73683.cu | //
// Created by Mike on 6/16/2021.
//
#include <glm/gtx/component_wise.hpp>
#include <gpu/cuda_octree.h>
[[nodiscard]] SV_XPU inline auto _intersect_box(Ray ray, glm::vec3 bbox_min, float bbox_r) noexcept {
auto bbox_max = bbox_min + bbox_r;
auto t_min = (bbox_min - ray.o) / ray.d;
auto t_max = (bbox_max - ray.o) / ray.d;
auto o_mat = glm::mat3{ray.o, ray.o, ray.o};
glm::vec3 p_min[]{ray.o + t_min.x * ray.d, ray.o + t_min.y * ray.d, ray.o + t_min.z * ray.d};
glm::vec3 p_max[]{ray.o + t_max.x * ray.d, ray.o + t_max.y * ray.d, ray.o + t_max.z * ray.d};
auto valid = [ray, bbox_min, bbox_max](auto t, auto p) noexcept {
return glm::not_(glm::isnan(t))
&& glm::greaterThanEqual(t, glm::vec3{ray.t_min})
&& glm::lessThanEqual(t, glm::vec3{ray.t_max})
&& glm::bvec3{p[0].y >= bbox_min.y && p[0].y <= bbox_max.y
&& p[0].z >= bbox_min.z && p[0].z <= bbox_max.z,
p[1].z >= bbox_min.z && p[1].z <= bbox_max.z
&& p[1].x >= bbox_min.x && p[1].x <= bbox_max.x,
p[2].x >= bbox_min.x && p[2].x <= bbox_max.x
&& p[2].y >= bbox_min.y && p[2].y <= bbox_max.y};
};
auto t_invalid = glm::vec3{std::numeric_limits<float>::max()};
auto valid_min = valid(t_min, p_min);
auto valid_max = valid(t_max, p_max);
t_min = glm::mix(t_invalid, t_min, valid_min);
t_max = glm::mix(t_invalid, t_max, valid_max);
auto t = glm::min(t_min, t_max);
auto is_min = glm::lessThanEqual(t_min, t_max);
Hit hit{};
hit.t = std::numeric_limits<float>::max();
hit.valid = false;
if (t.x < hit.t) {
hit.p = is_min.x ? p_min[0] : p_max[0];
hit.t = t.x;
hit.ng = {is_min.x ? -1.0f : 1.0f, 0.0f, 0.0f};
hit.valid = valid_min.x || valid_max.x;
}
if (t.y < hit.t) {
hit.p = is_min.y ? p_min[1] : p_max[1];
hit.t = t.y;
hit.ng = {0.0f, is_min.y ? -1.0f : 1.0f, 0.0f};
hit.valid = valid_min.y || valid_max.y;
}
if (t.z < hit.t) {
hit.p = is_min.z ? p_min[2] : p_max[2];
hit.t = t.z;
hit.ng = {0.0f, 0.0f, is_min.z ? -1.0f : 1.0f};
hit.valid = valid_min.z || valid_max.z;
}
return hit;
}
static constexpr auto m000 = 0b00000001u;
static constexpr auto m001 = 0b00000010u;
static constexpr auto m011 = 0b00000100u;
static constexpr auto m010 = 0b00001000u;
static constexpr auto m110 = 0b00010000u;
static constexpr auto m111 = 0b00100000u;
static constexpr auto m101 = 0b01000000u;
static constexpr auto m100 = 0b10000000u;
__constant__ uint32_t _node_m[]{m000, m001, m011, m010, m110, m111, m101, m100};
static constexpr auto d000 = float3{0.0f, 0.0f, 0.0f};
static constexpr auto d001 = float3{0.0f, 0.0f, 1.0f};
static constexpr auto d010 = float3{0.0f, 1.0f, 0.0f};
static constexpr auto d011 = float3{0.0f, 1.0f, 1.0f};
static constexpr auto d100 = float3{1.0f, 0.0f, 0.0f};
static constexpr auto d101 = float3{1.0f, 0.0f, 1.0f};
static constexpr auto d110 = float3{1.0f, 1.0f, 0.0f};
static constexpr auto d111 = float3{1.0f, 1.0f, 1.0f};
__constant__ float3 _node_d[]{d000, d001, d011, d010, d110, d111, d101, d100};
__global__ void octree_trace_closest(const CUDAOctree::Node *nodes, uint32_t resolution, const Ray *rays, Hit *hits, uint32_t w, uint32_t h) {
auto x = threadIdx.x + blockIdx.x * blockDim.x;
auto y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= w || y >= h) { return; }
auto tid = x + y * w;
Hit closest{};
closest.t = std::numeric_limits<float>::max();
closest.valid = false;
if (nodes[0].empty()) {
hits[tid] = closest;
return;
}
struct alignas(16) TraceContext {
glm::vec3 o;
float r;
uint32_t index;
float t;
};
static constexpr auto stack_size = 32u;
TraceContext stack[stack_size];
auto sp = 0u;
auto ray = rays[tid];
auto add_node = [&sp, ray, &stack, &closest, nodes](auto index, auto o, auto r) noexcept {
if (auto hit = _intersect_box(ray, o, r); hit.valid && hit.t < closest.t) {
auto node = nodes[index];
if (node.full()) {
closest = hit;
} else if (r == 2.0f) {
#pragma unroll
for (auto i = 0u; i < 8u; i++) {
if ((node.child_masks() & _node_m[i])) {
auto d = _node_d[i];
if (auto child_hit = _intersect_box(ray, o + glm::vec3{d.x, d.y, d.z}, 1.0f);
child_hit.valid && child_hit.t < closest.t) {
closest = child_hit;
}
}
}
} else {
if (sp == stack_size) { printf("warning: stack overflows\n"); }
stack[sp++] = {o, r, index, hit.t};
}
}
};
add_node(0u, glm::vec3{}, static_cast<float>(resolution));
while (sp != 0u) {
auto ctx = stack[--sp];
if (ctx.t >= closest.t) { continue; }
auto node = nodes[ctx.index];
auto half_r = ctx.r * 0.5f;
#pragma unroll
for (auto i = 0u; i < 8u; i++) {
if (node.child_masks() & _node_m[i]) {
auto d = _node_d[i];
add_node(ctx.index + node.child_offset() + i, ctx.o + glm::vec3{d.x, d.y, d.z} * half_r, half_r);
}
}
}
hits[tid] = closest;
}
void CUDAOctree::trace_closest(const Ray *rays, Hit *hits, uint32_t width, uint32_t height) const noexcept {
static constexpr auto block_size = 16u;
auto blocks_x = (width + block_size - 1u) / block_size;
auto blocks_y = (height + block_size - 1u) / block_size;
octree_trace_closest<<<dim3(blocks_x, blocks_y), dim3(block_size, block_size)>>>(_nodes->data(), _resolution, rays, hits, width, height);
}
|
da8eced988dbbb316a0cba9bdd707c8454247977.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe
{
template <typename Dtype>
void VerificationLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top)
{
const int count = bottom[0]->count();
caffe_gpu_sub(count,
bottom[0]->gpu_data(), // a
bottom[1]->gpu_data(), // b
diff_.mutable_gpu_data()); // a_i-b_i
caffe_gpu_powx(count,
diff_.mutable_gpu_data(), // a_i-b_i
Dtype(2),
diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2
caffe_gpu_gemv(CblasNoTrans,
bottom[0]->num(),
bottom[0]->channels(),
Dtype(1.0),
diff_sq_.gpu_data(), // (a_i-b_i)^2
summer_vec_.gpu_data(),
Dtype(0.0),
dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
bool legacy_version = this->layer_param_.contrastive_loss_param().legacy_version();
Dtype loss(0.0);
for (int i = 0; i < bottom[0]->num(); ++i)
{
if (static_cast<int>(bottom[2]->cpu_data()[i]) == static_cast<int>(bottom[3]->cpu_data()[i])) // similar pairs
loss += dist_sq_.cpu_data()[i];
else // dissimilar pairs
{
if (legacy_version)
loss += ::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0));
else
{
Dtype dist = ::max(margin - sqrt(dist_sq_.cpu_data()[i]), Dtype(0.0));
loss += dist * dist;
}
}
}
loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void CLLBackward(const int count, const int channels, const Dtype margin, const bool legacy_version,
const Dtype alpha, const Dtype *y0, const Dtype *y1, const Dtype *diff, const Dtype *dist_sq, Dtype *bottom_diff)
{
CUDA_KERNEL_LOOP(i, count)
{
int n = i / channels; // the num index, to access y and dist_sq
if (static_cast<int>(y0[n]) == static_cast<int>(y1[n])) // similar pairs
bottom_diff[i] = alpha * diff[i];
else // dissimilar pairs
{
Dtype mdist(0.0);
Dtype beta(0.0);
if (legacy_version)
{
mdist = (margin - dist_sq[n]);
beta = -alpha;
}
else
{
Dtype dist = sqrt(dist_sq[n]);
mdist = (margin - dist);
beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i];
}
if (mdist > 0.0)
bottom_diff[i] = beta;
else
bottom_diff[i] = 0;
}
}
}
template <typename Dtype>
void VerificationLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom)
{
for (int i = 0; i < 2; ++i)
{
if (propagate_down[i])
{
const int count = bottom[0]->count();
const int channels = bottom[0]->channels();
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
const bool legacy_version = this->layer_param_.contrastive_loss_param().legacy_version();
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast<Dtype>(bottom[0]->num());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( CLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, channels, margin, legacy_version, alpha,
bottom[2]->gpu_data(), // label0
bottom[3]->gpu_data(), // label1
diff_.gpu_data(), // the cached eltwise difference between a and b
dist_sq_.gpu_data(), // the cached square distance between a and b
bottom[i]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(VerificationLossLayer);
} // namespace caffe
| da8eced988dbbb316a0cba9bdd707c8454247977.cu |
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe
{
template <typename Dtype>
void VerificationLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top)
{
const int count = bottom[0]->count();
caffe_gpu_sub(count,
bottom[0]->gpu_data(), // a
bottom[1]->gpu_data(), // b
diff_.mutable_gpu_data()); // a_i-b_i
caffe_gpu_powx(count,
diff_.mutable_gpu_data(), // a_i-b_i
Dtype(2),
diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2
caffe_gpu_gemv(CblasNoTrans,
bottom[0]->num(),
bottom[0]->channels(),
Dtype(1.0),
diff_sq_.gpu_data(), // (a_i-b_i)^2
summer_vec_.gpu_data(),
Dtype(0.0),
dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
bool legacy_version = this->layer_param_.contrastive_loss_param().legacy_version();
Dtype loss(0.0);
for (int i = 0; i < bottom[0]->num(); ++i)
{
if (static_cast<int>(bottom[2]->cpu_data()[i]) == static_cast<int>(bottom[3]->cpu_data()[i])) // similar pairs
loss += dist_sq_.cpu_data()[i];
else // dissimilar pairs
{
if (legacy_version)
loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0));
else
{
Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), Dtype(0.0));
loss += dist * dist;
}
}
}
loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void CLLBackward(const int count, const int channels, const Dtype margin, const bool legacy_version,
const Dtype alpha, const Dtype *y0, const Dtype *y1, const Dtype *diff, const Dtype *dist_sq, Dtype *bottom_diff)
{
CUDA_KERNEL_LOOP(i, count)
{
int n = i / channels; // the num index, to access y and dist_sq
if (static_cast<int>(y0[n]) == static_cast<int>(y1[n])) // similar pairs
bottom_diff[i] = alpha * diff[i];
else // dissimilar pairs
{
Dtype mdist(0.0);
Dtype beta(0.0);
if (legacy_version)
{
mdist = (margin - dist_sq[n]);
beta = -alpha;
}
else
{
Dtype dist = sqrt(dist_sq[n]);
mdist = (margin - dist);
beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i];
}
if (mdist > 0.0)
bottom_diff[i] = beta;
else
bottom_diff[i] = 0;
}
}
}
template <typename Dtype>
void VerificationLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom)
{
for (int i = 0; i < 2; ++i)
{
if (propagate_down[i])
{
const int count = bottom[0]->count();
const int channels = bottom[0]->channels();
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
const bool legacy_version = this->layer_param_.contrastive_loss_param().legacy_version();
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast<Dtype>(bottom[0]->num());
// NOLINT_NEXT_LINE(whitespace/operators)
CLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, channels, margin, legacy_version, alpha,
bottom[2]->gpu_data(), // label0
bottom[3]->gpu_data(), // label1
diff_.gpu_data(), // the cached eltwise difference between a and b
dist_sq_.gpu_data(), // the cached square distance between a and b
bottom[i]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(VerificationLossLayer);
} // namespace caffe
|
bff2ef7ff54c1ac97c5ff66efdaea632ed74e50e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//constant memory
//global memory
#include<stdio.h>
#include<math.h>
#include<time.h>
#include <stdlib.h>
int Max=16384;
int width=32;
double err = 0.1;
__constant__ double con_b[8192];
__global__ void multi(double *A,double *C,const int Max,int i){
int idx=threadIdx.x+blockDim.x*blockIdx.x;
//int idy=threadIdx.y+blockDim.y*blockIdx.y;
if(idx<Max){
int k=0;
double sum=0;
for(k=i*Max/2;k<(i+1)*Max/2;k++){
sum+=A[idx*Max+k]*con_b[k%(Max/2)];
}
C[idx]+=sum;
}
}
int main(){
printf("constant memory:\n");
double *A =(double *)malloc(Max * Max * sizeof(double)); //A
double b[Max]; //b
double *C =(double *)malloc(Max * sizeof(double)); //C
double *test_c=(double *)malloc(Max * sizeof(double)); //cpu_test
int i,j;
for(i=0;i<Max;i++){
for(j=0;j<Max;j++){
A[i*Max+j]=i-0.1*j+1;
}
}
for(i=0;i<Max;i++){
b[i]=log(sqrt(i*i-i+2));
C[i]=0.0;
}
double *A_d,*C_d;
hipMalloc((void **)&A_d,Max * Max * sizeof(double));
hipMalloc((void **)&C_d,Max *sizeof(double));
clock_t start,end;
start=clock();
hipMemcpy(A_d, A,Max*Max*sizeof(double),hipMemcpyHostToDevice);
//hipMemcpyToSymbol(con_b, b, sizeof(double) * Max);
hipMemcpy(C_d, C,Max * sizeof(double), hipMemcpyHostToDevice);
dim3 block(width,1);
dim3 grid(Max/block.x, 1);
for(int i=0;i<2;i++){
hipMemcpyToSymbol(con_b, &b[i*Max/2], sizeof(double) * Max/2);
hipLaunchKernelGGL(( multi), dim3(grid),dim3(block), 0, 0, A_d,C_d,Max,i);
}
hipMemcpy(C, C_d, Max * sizeof(double), hipMemcpyDeviceToHost);
end=clock();
double time=(end-start)*1000/CLOCKS_PER_SEC;
//cpu:
clock_t start_c,end_c;
start_c=clock();
for (int i = 0; i < Max; ++i){
for (int j = 0; j < Max; ++j)
{
test_c[i]+=A[i*Max+j]*b[j];
}
}
end_c=clock();
double time_C=(end_c-start_c)*1000/CLOCKS_PER_SEC;
printf("GPU TIME:%lf ms\n",time);
printf("CPU TIME:%lf ms\n",time_C);
//check result:
bool flag = true;
for (int i = 0; i < Max; ++i){
double a=test_c[i];
double b=C[i];
if (abs(a-b)>err)
{
printf("cpu:%lf gpu:%lf\n",a,b);
flag = false;
}
}
if (flag == true)
printf("result correct\n");
else{
printf("resul wrong\n");
}
hipFree(A_d);
hipFree(C_d);
free(A);
free(test_c);
free(C);
}
| bff2ef7ff54c1ac97c5ff66efdaea632ed74e50e.cu | //使用constant memory存放向量
//global memory
#include<stdio.h>
#include<math.h>
#include<time.h>
#include <stdlib.h>
int Max=16384;
int width=32;
double err = 0.1;
__constant__ double con_b[8192];
__global__ void multi(double *A,double *C,const int Max,int i){
int idx=threadIdx.x+blockDim.x*blockIdx.x;
//int idy=threadIdx.y+blockDim.y*blockIdx.y;
if(idx<Max){
int k=0;
double sum=0;
for(k=i*Max/2;k<(i+1)*Max/2;k++){
sum+=A[idx*Max+k]*con_b[k%(Max/2)];
}
C[idx]+=sum;
}
}
int main(){
printf("使用constant memory存放向量:\n");
double *A =(double *)malloc(Max * Max * sizeof(double)); //A
double b[Max]; //b
double *C =(double *)malloc(Max * sizeof(double)); //C
double *test_c=(double *)malloc(Max * sizeof(double)); //cpu_test
int i,j;
for(i=0;i<Max;i++){
for(j=0;j<Max;j++){
A[i*Max+j]=i-0.1*j+1;
}
}
for(i=0;i<Max;i++){
b[i]=log(sqrt(i*i-i+2));
C[i]=0.0;
}
double *A_d,*C_d;
cudaMalloc((void **)&A_d,Max * Max * sizeof(double));
cudaMalloc((void **)&C_d,Max *sizeof(double));
clock_t start,end;
start=clock();
cudaMemcpy(A_d, A,Max*Max*sizeof(double),cudaMemcpyHostToDevice);
//cudaMemcpyToSymbol(con_b, b, sizeof(double) * Max);
cudaMemcpy(C_d, C,Max * sizeof(double), cudaMemcpyHostToDevice);
dim3 block(width,1);
dim3 grid(Max/block.x, 1);
for(int i=0;i<2;i++){
cudaMemcpyToSymbol(con_b, &b[i*Max/2], sizeof(double) * Max/2);
multi<<<grid,block>>>(A_d,C_d,Max,i);
}
cudaMemcpy(C, C_d, Max * sizeof(double), cudaMemcpyDeviceToHost);
end=clock();
double time=(end-start)*1000/CLOCKS_PER_SEC;
//cpu:
clock_t start_c,end_c;
start_c=clock();
for (int i = 0; i < Max; ++i){
for (int j = 0; j < Max; ++j)
{
test_c[i]+=A[i*Max+j]*b[j];
}
}
end_c=clock();
double time_C=(end_c-start_c)*1000/CLOCKS_PER_SEC;
printf("GPU TIME:%lf ms\n",time);
printf("CPU TIME:%lf ms\n",time_C);
//check result:
bool flag = true;
for (int i = 0; i < Max; ++i){
double a=test_c[i];
double b=C[i];
if (abs(a-b)>err)
{
printf("cpu:%lf gpu:%lf\n",a,b);
flag = false;
}
}
if (flag == true)
printf("result correct\n");
else{
printf("resul wrong\n");
}
cudaFree(A_d);
cudaFree(C_d);
free(A);
free(test_c);
free(C);
}
|
d820569da6ca19b79ea714f76649daee7279c792.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef wip
__global__ void halfy(const int N, const double *data, double *out)
{
#ifdef shared
extern __shared__ double in[];
#endif
//ber y=1:n bei festem x aus 1..2N
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x<2*N&&y<N) {
#ifdef shared
int step=min(N,blockDim.y);
int inpos=x*2*N + y;
int sharepos=threadIdx.y*2+1;
if (threadIdx.y==0)in[0]=((blockIdx.y>0)?data[inpos-1]:0);
in[1+threadIdx.y]=data[inpos];
in[1+threadIdx.y+step]=data[inpos+step];
__syncthreads();
double prev=in[sharepos-1];
double center=in[sharepos];
double next=in[sharepos+1];
#else
int inpos=x*2*N + 2 * y;
double prev= y>0?data[inpos-1]:0;
double center = data[inpos];
double next = data[inpos+1];
#endif
out[x*N + y] =(prev+2*center+next) /((y>0)?4:3);
}
}
__global__ void halfx(const int N, const double *data, double *out)
{
//ber x=1:n bei festem y aus 1..n
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x<N&&y<N) {
double prev= x>0?data[(x*2-1)*N + y]:0;
double center = data[x*2*N + y];
double next = data[(x*2+1)*N + y];
out[x*N + y] = (prev+2*center+next)/((x>0)+3);
// out[x*N + y]=prev;
}
}
#endif
__global__ void halfimage(const int N, const double *data, double *out)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x<N&&y<N) {
const int N2=2*N;
const int x2=x*2;
const int y2=y*2;
double left=0;
if (x>0){
left=(2*data[(x2-1)*N2+y2]+data[(x2-1)*N2+y2+1]);
if (y>0)left+=data[(x2-1)*N2+y2-1];
}
if (y>0)left=left/4;else left=left/3;
double center=(2*data[x2*N2+y2]+data[x2*N2+y2+1]);
if (y>0) center+=data[x2*N2+y2-1];
if (y>0) center=center/4; else center=center/3;
double right=(2*data[(x2+1)*N2+y2]+data[(x2+1)*N2+y2+1]);
if (y>0) right+=data[(x2+1)*N2+y2-1];
if (y>0) right=right/4;else right=right/3;
out[x*N + y]=(left+2*center+right)/((x>0)?4:3);
}
} | d820569da6ca19b79ea714f76649daee7279c792.cu | #ifdef wip
__global__ void halfy(const int N, const double *data, double *out)
{
#ifdef shared
extern __shared__ double in[];
#endif
//über y=1:n bei festem x aus 1..2N
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x<2*N&&y<N) {
#ifdef shared
int step=min(N,blockDim.y);
int inpos=x*2*N + y;
int sharepos=threadIdx.y*2+1;
if (threadIdx.y==0)in[0]=((blockIdx.y>0)?data[inpos-1]:0);
in[1+threadIdx.y]=data[inpos];
in[1+threadIdx.y+step]=data[inpos+step];
__syncthreads();
double prev=in[sharepos-1];
double center=in[sharepos];
double next=in[sharepos+1];
#else
int inpos=x*2*N + 2 * y;
double prev= y>0?data[inpos-1]:0;
double center = data[inpos];
double next = data[inpos+1];
#endif
out[x*N + y] =(prev+2*center+next) /((y>0)?4:3);
}
}
__global__ void halfx(const int N, const double *data, double *out)
{
//über x=1:n bei festem y aus 1..n
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x<N&&y<N) {
double prev= x>0?data[(x*2-1)*N + y]:0;
double center = data[x*2*N + y];
double next = data[(x*2+1)*N + y];
out[x*N + y] = (prev+2*center+next)/((x>0)+3);
// out[x*N + y]=prev;
}
}
#endif
__global__ void halfimage(const int N, const double *data, double *out)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x<N&&y<N) {
const int N2=2*N;
const int x2=x*2;
const int y2=y*2;
double left=0;
if (x>0){
left=(2*data[(x2-1)*N2+y2]+data[(x2-1)*N2+y2+1]);
if (y>0)left+=data[(x2-1)*N2+y2-1];
}
if (y>0)left=left/4;else left=left/3;
double center=(2*data[x2*N2+y2]+data[x2*N2+y2+1]);
if (y>0) center+=data[x2*N2+y2-1];
if (y>0) center=center/4; else center=center/3;
double right=(2*data[(x2+1)*N2+y2]+data[(x2+1)*N2+y2+1]);
if (y>0) right+=data[(x2+1)*N2+y2-1];
if (y>0) right=right/4;else right=right/3;
out[x*N + y]=(left+2*center+right)/((x>0)?4:3);
}
} |
922084b15cc4ff2a608037cea85066735224465c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <Windows.h>
__global__ void matrixMultiply(hipPitchedPtr matrix1, hipExtent extent){
//printf("matrixMultiply is called from: %d, %d", threadIdx.x, threadIdx.y);
char* devPtr = (char*)matrix1.ptr;
size_t pitch = matrix1.pitch;
size_t slicePitch = pitch*extent.height;
int x = threadIdx.x;
int y = threadIdx.y;
int z = threadIdx.z;
char* slice = devPtr + z * slicePitch;
float* row = (float*)(slice + y * pitch);
printf("%d,%d,%d : %f\n", x, y, z, row[x]);
}
int main(void){
static const size_t ROWNUM = 10;
static const size_t COLNUM = 5;
static const size_t Z = 2;
float* h_data = new float[ROWNUM*COLNUM*Z];
for (int i = 0; i < ROWNUM*COLNUM*Z; i++){
h_data[i] = (float)i;
}
hipPitchedPtr h_dataPtr = make_hipPitchedPtr(h_data, ROWNUM*sizeof(float), ROWNUM, COLNUM);
hipPitchedPtr d_matrix1Ptr;
hipExtent extent = make_hipExtent(ROWNUM*sizeof(float), COLNUM, Z);
hipMalloc3D(&d_matrix1Ptr, extent);
printf("%d\n", d_matrix1Ptr.pitch);
hipMemcpy3DParms params = { 0 };
params.srcPtr = h_dataPtr;
params.dstPtr = d_matrix1Ptr;
params.extent = extent;
params.kind = hipMemcpyHostToDevice;
hipMemcpy3D(¶ms);
printf("%s\n", hipGetErrorString(hipGetLastError()));
dim3 dimen = dim3(ROWNUM, COLNUM, Z);
matrixMultiply << <1, dimen >> >(d_matrix1Ptr, extent);
delete[] h_data;
hipFree(d_matrix1Ptr.ptr);
system("pause");
return 0;
} | 922084b15cc4ff2a608037cea85066735224465c.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <Windows.h>
__global__ void matrixMultiply(cudaPitchedPtr matrix1, cudaExtent extent){
//printf("matrixMultiply is called from: %d, %d", threadIdx.x, threadIdx.y);
char* devPtr = (char*)matrix1.ptr;
size_t pitch = matrix1.pitch;
size_t slicePitch = pitch*extent.height;
int x = threadIdx.x;
int y = threadIdx.y;
int z = threadIdx.z;
char* slice = devPtr + z * slicePitch;
float* row = (float*)(slice + y * pitch);
printf("%d,%d,%d : %f\n", x, y, z, row[x]);
}
int main(void){
static const size_t ROWNUM = 10;
static const size_t COLNUM = 5;
static const size_t Z = 2;
float* h_data = new float[ROWNUM*COLNUM*Z];
for (int i = 0; i < ROWNUM*COLNUM*Z; i++){
h_data[i] = (float)i;
}
cudaPitchedPtr h_dataPtr = make_cudaPitchedPtr(h_data, ROWNUM*sizeof(float), ROWNUM, COLNUM);
cudaPitchedPtr d_matrix1Ptr;
cudaExtent extent = make_cudaExtent(ROWNUM*sizeof(float), COLNUM, Z);
cudaMalloc3D(&d_matrix1Ptr, extent);
printf("%d\n", d_matrix1Ptr.pitch);
cudaMemcpy3DParms params = { 0 };
params.srcPtr = h_dataPtr;
params.dstPtr = d_matrix1Ptr;
params.extent = extent;
params.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(¶ms);
printf("%s\n", cudaGetErrorString(cudaGetLastError()));
dim3 dimen = dim3(ROWNUM, COLNUM, Z);
matrixMultiply << <1, dimen >> >(d_matrix1Ptr, extent);
delete[] h_data;
cudaFree(d_matrix1Ptr.ptr);
system("pause");
return 0;
} |
1b7c3fac82d5e89258e6008be231ec61ca59ecf4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/top_k_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/top_k_function_cuda.h"
namespace phi {
template <typename T, typename Context>
void TopkGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& indices,
const DenseTensor& out_grad,
const Scalar& k_scalar,
int axis,
bool largest,
bool sorted,
DenseTensor* x_grad) {
const auto& in_dims = x.dims();
const auto& out_dims = indices.dims();
int k = k_scalar.to<int>();
// get the real the axis and the k
if (axis < 0) {
axis += in_dims.size();
}
const int& raw_height = in_dims[axis];
// allocate the cuda memory for the x_grad
T* x_grad_data = dev_ctx.template Alloc<T>(x_grad);
const T* out_grad_data = out_grad.data<T>();
const int64_t* indices_data = indices.data<int64_t>();
if (in_dims.size() == 0) {
phi::Copy<Context>(dev_ctx, out_grad, dev_ctx.GetPlace(), false, x_grad);
return;
}
int pre, n, post;
phi::funcs::GetDims(in_dims, axis, &pre, &n, &post);
// calcluate the block and grid num
auto ComputeBlockSize = [](int col) {
if (col > 512)
return 1024;
else if (col > 256 && col <= 512)
return 512;
else if (col > 128 && col <= 256)
return 256;
else if (col > 64 && col <= 128)
return 128;
else
return 64;
};
int block_size = ComputeBlockSize(post * k);
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
const int max_blocks = ::max(((max_threads - 1) / block_size + 1), 1);
int grid_size = ::min(max_blocks, pre);
// lanuch the cuda kernel to assign the grad
hipLaunchKernelGGL(( phi::funcs::AssignGradWithAxis<T>)
, dim3(grid_size), dim3(block_size), 64 * 4, dev_ctx.stream(),
out_grad_data, indices_data, x_grad_data, pre, post, n, k);
}
} // namespace phi
PD_REGISTER_KERNEL(topk_grad,
GPU,
ALL_LAYOUT,
phi::TopkGradKernel,
float,
double,
int,
int64_t,
phi::dtype::float16,
phi::dtype::bfloat16) {}
| 1b7c3fac82d5e89258e6008be231ec61ca59ecf4.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/top_k_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/top_k_function_cuda.h"
namespace phi {
template <typename T, typename Context>
void TopkGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& indices,
const DenseTensor& out_grad,
const Scalar& k_scalar,
int axis,
bool largest,
bool sorted,
DenseTensor* x_grad) {
const auto& in_dims = x.dims();
const auto& out_dims = indices.dims();
int k = k_scalar.to<int>();
// get the real the axis and the k
if (axis < 0) {
axis += in_dims.size();
}
const int& raw_height = in_dims[axis];
// allocate the cuda memory for the x_grad
T* x_grad_data = dev_ctx.template Alloc<T>(x_grad);
const T* out_grad_data = out_grad.data<T>();
const int64_t* indices_data = indices.data<int64_t>();
if (in_dims.size() == 0) {
phi::Copy<Context>(dev_ctx, out_grad, dev_ctx.GetPlace(), false, x_grad);
return;
}
int pre, n, post;
phi::funcs::GetDims(in_dims, axis, &pre, &n, &post);
// calcluate the block and grid num
auto ComputeBlockSize = [](int col) {
if (col > 512)
return 1024;
else if (col > 256 && col <= 512)
return 512;
else if (col > 128 && col <= 256)
return 256;
else if (col > 64 && col <= 128)
return 128;
else
return 64;
};
int block_size = ComputeBlockSize(post * k);
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
const int max_blocks = std::max(((max_threads - 1) / block_size + 1), 1);
int grid_size = std::min(max_blocks, pre);
// lanuch the cuda kernel to assign the grad
phi::funcs::AssignGradWithAxis<T>
<<<grid_size, block_size, 64 * 4, dev_ctx.stream()>>>(
out_grad_data, indices_data, x_grad_data, pre, post, n, k);
}
} // namespace phi
PD_REGISTER_KERNEL(topk_grad,
GPU,
ALL_LAYOUT,
phi::TopkGradKernel,
float,
double,
int,
int64_t,
phi::dtype::float16,
phi::dtype::bfloat16) {}
|
008c554dc7b40780509d4c55e60cf512ca37ff93.hip | // !!! This is a file automatically generated by hipify!!!
//////////////////////////////////////////////////////////////////////////
////This is the code implementation for GPU Premier League Round 2: n-body simulation
//////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <fstream>
#include <vector>
#include <chrono>
#include <hip/hip_runtime.h>
using namespace std;
#define BDIMX 32
//////////////////////////////////////////////////////////////////////////
////TODO 0: Please replace the following strings with your team name and author names
////Note: Please do not use space in the string, use "_" instead
//////////////////////////////////////////////////////////////////////////
namespace name
{
std::string team="dns";
std::string author_1="Gao_Chen";
std::string author_2="Nicolas_Flores";
std::string author_3="Shikhar_Sinha";
};
//////////////////////////////////////////////////////////////////////////
////Here is a sample function implemented on CPU for n-body simulation.
__host__ void N_Body_Simulation_CPU_Poorman(double* pos_x,double* pos_y,double* pos_z, ////position array
double* vel_x,double* vel_y,double* vel_z, ////velocity array
double* acl_x,double* acl_y,double* acl_z, ////acceleration array
const double* mass, ////mass array
const int n, ////number of particles
const double dt, ////timestep
const double epsilon_squared) ////epsilon to avoid 0-denominator
{
////Step 1: set particle accelerations to be zero
memset(acl_x,0x00,sizeof(double)*n);
memset(acl_y,0x00,sizeof(double)*n);
memset(acl_z,0x00,sizeof(double)*n);
////Step 2: traverse all particle pairs and accumulate gravitational forces for each particle from pairwise interactions
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
////skip calculating force for itself
if(i==j) continue;
////r_ij=x_j-x_i
double rx=pos_x[j]-pos_x[i];
double ry=pos_y[j]-pos_y[i];
double rz=pos_z[j]-pos_z[i];
////a_ij=m_j*r_ij/(r+epsilon)^3,
////noticing that we ignore the gravitational coefficient (assuming G=1)
double dis_squared=rx*rx+ry*ry+rz*rz;
double one_over_dis_cube=1.0/pow(sqrt(dis_squared+epsilon_squared),3);
double ax=mass[j]*rx*one_over_dis_cube;
double ay=mass[j]*ry*one_over_dis_cube;
double az=mass[j]*rz*one_over_dis_cube;
////accumulate the force to the particle
acl_x[i]+=ax;
acl_y[i]+=ay;
acl_z[i]+=az;
}
}
////Step 3: explicit time integration to update the velocity and position of each particle
for(int i=0;i<n;i++){
////v_{t+1}=v_{t}+a_{t}*dt
vel_x[i]+=acl_x[i]*dt;
vel_y[i]+=acl_y[i]*dt;
vel_z[i]+=acl_z[i]*dt;
////x_{t+1}=x_{t}+v_{t}*dt
pos_x[i]+=vel_x[i]*dt;
pos_y[i]+=vel_y[i]*dt;
pos_z[i]+=vel_z[i]*dt;
}
}
//////////////////////////////////////////////////////////////////////////
////TODO 1: your GPU variables and functions start here
__global__ void N_BODY_GPU(
double* pos_x,double* pos_y,double* pos_z, ////position array
double* vel_x,double* vel_y,double* vel_z, ////velocity array
double* acl_x,double* acl_y,double* acl_z, ////acceleration array
double* pos_x_2,double* pos_y_2,double* pos_z_2, ////position array
double* vel_x_2,double* vel_y_2,double* vel_z_2, ////velocity array
double* acl_x_2,double* acl_y_2,double* acl_z_2,
const double* mass, ////mass array
const int* n, ////number of particles
const double* dt, ////timestep
const double* epsilon_squared) ////epsilon to avoid 0-denominator
{
__shared__ double x[BDIMX];
__shared__ double y[BDIMX];
__shared__ double z[BDIMX];
__shared__ double m[BDIMX];
double new_acl_x = 0;
double new_acl_y = 0;
double new_acl_z = 0;
double eps2 = epsilon_squared[0];
int bnum = blockDim.x * blockIdx.x + threadIdx.x; // the particle index
double own_pos_x = pos_x[bnum];
double own_pos_y = pos_y[bnum];
double own_pos_z = pos_z[bnum];
// need to load in values n/p times so we do:
for(int i = 0; i < n[0] / BDIMX; i++)
{
x[threadIdx.x] = pos_x[i * BDIMX + threadIdx.x];
y[threadIdx.x] = pos_y[i * BDIMX + threadIdx.x];
z[threadIdx.x] = pos_z[i * BDIMX + threadIdx.x];
m[threadIdx.x] = mass[i * BDIMX + threadIdx.x];
__syncthreads();
// do the calculation: for each of the p bodies loaded in, calculate the stuff
for(int j = 0; j < BDIMX; j+=1)
{
double r_x = x[j] - own_pos_x;
double r_y = y[j] - own_pos_y;
double r_z = z[j] - own_pos_z;
double dist_sqr = r_x * r_x + r_y * r_y + r_z * r_z + eps2;
double dist_sixth = dist_sqr * dist_sqr * dist_sqr;
double m_over_dis_cube = m[j]/sqrt(dist_sixth);
new_acl_x += r_x * m_over_dis_cube;
new_acl_y += r_y * m_over_dis_cube;
new_acl_z += r_z * m_over_dis_cube;
}
__syncthreads();
}
acl_x_2[bnum] = new_acl_x;
acl_y_2[bnum] = new_acl_y;
acl_z_2[bnum] = new_acl_z;
double new_v_x = vel_x[bnum] + new_acl_x * dt[0];
double new_v_y = vel_y[bnum] + new_acl_y * dt[0];
double new_v_z = vel_z[bnum] + new_acl_z * dt[0];
pos_x_2[bnum] = own_pos_x + new_v_x * dt[0];
pos_y_2[bnum] = own_pos_y + new_v_y * dt[0];
pos_z_2[bnum] = own_pos_z + new_v_z * dt[0];
vel_x_2[bnum] = new_v_x;
vel_y_2[bnum] = new_v_y;
vel_z_2[bnum] = new_v_z;
}
////Your implementations end here
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
////Test function for n-body simulator
ofstream out;
//////////////////////////////////////////////////////////////////////////
////Please do not change the values below
const double dt=0.001; ////time step
const int time_step_num=10; ////number of time steps
const double epsilon=1e-2; ////epsilon added in the denominator to avoid 0-division when calculating the gravitational force
const double epsilon_squared=epsilon*epsilon; ////epsilon squared
////We use grid_size=4 to help you debug your code, change it to a bigger number (e.g., 16, 32, etc.) to test the performance of your GPU code
const unsigned int grid_size=16; ////assuming particles are initialized on a background grid
const unsigned int particle_n=pow(grid_size,3); ////assuming each grid cell has one particle at the beginning
__host__ void Test_N_Body_Simulation()
{
////initialize position, velocity, acceleration, and mass
double* pos_x=new double[particle_n];
double* pos_y=new double[particle_n];
double* pos_z=new double[particle_n];
////initialize particle positions as the cell centers on a background grid
double dx=1.0/(double)grid_size;
for(unsigned int k=0;k<grid_size;k++){
for(unsigned int j=0;j<grid_size;j++){
for(unsigned int i=0;i<grid_size;i++){
unsigned int index=k*grid_size*grid_size+j*grid_size+i;
pos_x[index]=dx*(double)i;
pos_y[index]=dx*(double)j;
pos_z[index]=dx*(double)k;
}
}
}
double* vel_x=new double[particle_n];
memset(vel_x,0x00,particle_n*sizeof(double));
double* vel_y=new double[particle_n];
memset(vel_y,0x00,particle_n*sizeof(double));
double* vel_z=new double[particle_n];
memset(vel_z,0x00,particle_n*sizeof(double));
double* acl_x=new double[particle_n];
memset(acl_x,0x00,particle_n*sizeof(double));
double* acl_y=new double[particle_n];
memset(acl_y,0x00,particle_n*sizeof(double));
double* acl_z=new double[particle_n];
memset(acl_z,0x00,particle_n*sizeof(double));
double* mass=new double[particle_n];
for(int i=0;i<particle_n;i++){
mass[i]=100.0;
}
//////////////////////////////////////////////////////////////////////////
////Default implementation: n-body simulation on CPU
////Comment the CPU implementation out when you test large-scale examples
auto cpu_start=chrono::system_clock::now();
cout<<"Total number of particles: "<<particle_n<<endl;
cout<<"Tracking the motion of particle "<<particle_n/2<<endl;
// for(int i=0;i<time_step_num;i++){
// N_Body_Simulation_CPU_Poorman(pos_x,pos_y,pos_z,vel_x,vel_y,vel_z,acl_x,acl_y,acl_z,mass,particle_n,dt,epsilon_squared);
// cout<<"pos on timestep "<<i<<": "<<pos_x[particle_n/2]<<", "<<pos_y[particle_n/2]<<", "<<pos_z[particle_n/2]<<endl;
// }
auto cpu_end=chrono::system_clock::now();
chrono::duration<double> cpu_time=cpu_end-cpu_start;
cout<<"CPU runtime: "<<cpu_time.count()*1000.<<" ms."<<endl;
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
////Your implementation: n-body simulator on GPU
//////////////////////////////////////////////////////////////////////////
////TODO 2: Your GPU functions are called here
////Requirement: You need to copy data from the CPU arrays, conduct computations on the GPU, and copy the values back from GPU to CPU
////The final positions should be stored in the same place as the CPU n-body function, i.e., pos_x, pos_y, pos_z
////The correctness of your simulation will be evaluated by comparing the results (positions) with the results calculated by the default CPU implementations
//////////////////////////////////////////////////////////////////////////
for(unsigned int k=0;k<grid_size;k++){
for(unsigned int j=0;j<grid_size;j++){
for(unsigned int i=0;i<grid_size;i++){
unsigned int index=k*grid_size*grid_size+j*grid_size+i;
pos_x[index]=dx*(double)i;
pos_y[index]=dx*(double)j;
pos_z[index]=dx*(double)k;
}
}
}
memset(vel_x,0x00,particle_n*sizeof(double));
memset(vel_y,0x00,particle_n*sizeof(double));
memset(vel_z,0x00,particle_n*sizeof(double));
memset(acl_x,0x00,particle_n*sizeof(double));
memset(acl_y,0x00,particle_n*sizeof(double));
memset(acl_z,0x00,particle_n*sizeof(double));
hipEvent_t start,end;
hipEventCreate(&start);
hipEventCreate(&end);
float gpu_time=0.0f;
hipDeviceSynchronize();
hipEventRecord(start);
double *pos_x_dev;
double *pos_y_dev;
double *pos_z_dev;
double *vel_x_dev;
double *vel_y_dev;
double *vel_z_dev;
double *acl_x_dev;
double *acl_y_dev;
double *acl_z_dev;
double *pos_x_dev_2;
double *pos_y_dev_2;
double *pos_z_dev_2;
double *vel_x_dev_2;
double *vel_y_dev_2;
double *vel_z_dev_2;
double *acl_x_dev_2;
double *acl_y_dev_2;
double *acl_z_dev_2;
double *m_dev;
double *dt_dev;
double *epsilon_squared_dev;
int *n_dev;
hipMalloc((void**)&pos_x_dev, particle_n * sizeof(double));
hipMalloc((void**)&pos_y_dev, particle_n * sizeof(double));
hipMalloc((void**)&pos_z_dev, particle_n * sizeof(double));
hipMalloc((void**)&vel_x_dev, particle_n * sizeof(double));
hipMalloc((void**)&vel_y_dev, particle_n * sizeof(double));
hipMalloc((void**)&vel_z_dev, particle_n * sizeof(double));
hipMalloc((void**)&acl_x_dev, particle_n * sizeof(double));
hipMalloc((void**)&acl_y_dev, particle_n * sizeof(double));
hipMalloc((void**)&acl_z_dev, particle_n * sizeof(double));
hipMalloc((void**)&pos_x_dev_2, particle_n * sizeof(double));
hipMalloc((void**)&pos_y_dev_2, particle_n * sizeof(double));
hipMalloc((void**)&pos_z_dev_2, particle_n * sizeof(double));
hipMalloc((void**)&vel_x_dev_2, particle_n * sizeof(double));
hipMalloc((void**)&vel_y_dev_2, particle_n * sizeof(double));
hipMalloc((void**)&vel_z_dev_2, particle_n * sizeof(double));
hipMalloc((void**)&acl_x_dev_2, particle_n * sizeof(double));
hipMalloc((void**)&acl_y_dev_2, particle_n * sizeof(double));
hipMalloc((void**)&acl_z_dev_2, particle_n * sizeof(double));
hipMalloc((void**)&m_dev, particle_n * sizeof(double));
hipMalloc((void**)&dt_dev, sizeof(double));
hipMalloc((void**)&epsilon_squared_dev, sizeof(double));
hipMalloc((void**)&n_dev, sizeof(int));
hipMemcpy(pos_x_dev, pos_x, particle_n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(pos_y_dev, pos_y, particle_n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(pos_z_dev, pos_z, particle_n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(vel_x_dev, vel_x, particle_n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(vel_y_dev, vel_y, particle_n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(vel_z_dev, vel_z, particle_n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(acl_x_dev, acl_x, particle_n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(acl_y_dev, acl_y, particle_n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(acl_z_dev, acl_z, particle_n * sizeof(double), hipMemcpyHostToDevice);
hipMemset(pos_x_dev_2, 0, particle_n * sizeof(double));
hipMemset(pos_y_dev_2, 0, particle_n * sizeof(double));
hipMemset(pos_z_dev_2, 0, particle_n * sizeof(double));
hipMemset(vel_x_dev_2, 0, particle_n * sizeof(double));
hipMemset(vel_y_dev_2, 0, particle_n * sizeof(double));
hipMemset(vel_z_dev_2, 0, particle_n * sizeof(double));
hipMemset(acl_x_dev_2, 0, particle_n * sizeof(double));
hipMemset(acl_y_dev_2, 0, particle_n * sizeof(double));
hipMemset(acl_z_dev_2, 0, particle_n * sizeof(double));
hipMemcpy(m_dev, mass, particle_n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dt_dev, &dt, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(epsilon_squared_dev,&epsilon_squared, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(n_dev, &particle_n, sizeof(int), hipMemcpyHostToDevice);
cout<<"Total number of particles: "<<particle_n<<endl;
cout<<"Tracking the motion of particle "<<particle_n/2<<endl;
for (int i = 0;i < time_step_num; i++)
{
if(i % 2 == 0)
{
hipLaunchKernelGGL(( N_BODY_GPU), dim3(dim3(particle_n/BDIMX)), dim3(BDIMX), 0, 0,
pos_x_dev, pos_y_dev, pos_z_dev,
vel_x_dev, vel_y_dev, vel_z_dev,
acl_x_dev, acl_y_dev, acl_z_dev,
pos_x_dev_2, pos_y_dev_2, pos_z_dev_2,
vel_x_dev_2, vel_y_dev_2, vel_z_dev_2,
acl_x_dev_2, acl_y_dev_2, acl_z_dev_2,
m_dev,
n_dev,
dt_dev,
epsilon_squared_dev);
hipMemcpy(pos_x, pos_x_dev_2, particle_n * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(pos_y, pos_y_dev_2, particle_n * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(pos_z, pos_z_dev_2, particle_n * sizeof(double), hipMemcpyDeviceToHost);
cout<<"pos on timestep "<<i<<": "<<pos_x[particle_n/2]<<", "<<pos_y[particle_n/2]<<", "<<pos_z[particle_n/2]<<endl;
}
else
{
hipLaunchKernelGGL(( N_BODY_GPU), dim3(dim3(particle_n/BDIMX)), dim3(BDIMX), 0, 0,
pos_x_dev_2, pos_y_dev_2, pos_z_dev_2,
vel_x_dev_2, vel_y_dev_2, vel_z_dev_2,
acl_x_dev_2, acl_y_dev_2, acl_z_dev_2,
pos_x_dev, pos_y_dev, pos_z_dev,
vel_x_dev, vel_y_dev, vel_z_dev,
acl_x_dev, acl_y_dev, acl_z_dev,
m_dev,
n_dev,
dt_dev,
epsilon_squared_dev);
hipMemcpy(pos_x, pos_x_dev, particle_n * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(pos_y, pos_y_dev, particle_n * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(pos_z, pos_z_dev, particle_n * sizeof(double), hipMemcpyDeviceToHost);
cout<<"pos on timestep "<<i<<": "<<pos_x[particle_n/2]<<", "<<pos_y[particle_n/2]<<", "<<pos_z[particle_n/2]<<endl;
}
// pos on timestep 0: 0.655429, 0.655429, 0.473758
// pos on timestep 1: -0.407697, -0.407697, -0.144373
// pos on timestep 2: -1.30937, -1.30937, -0.876057
// pos on timestep 3: -2.19476, -2.19476, -1.62789
// pos on timestep 4: -3.07345, -3.07345, -2.38426
// pos on timestep 5: -3.94812, -3.94812, -3.14275
// pos on timestep 6: -4.82021, -4.82021, -3.90248
// pos on timestep 7: -5.69054, -5.69054, -4.663
// pos on timestep 8: -6.55961, -6.55961, -5.42408
// pos on timestep 9: -7.42775, -7.42775, -6.18555
}
hipFree(pos_x_dev);
hipFree(pos_y_dev);
hipFree(pos_z_dev);
hipFree(vel_x_dev);
hipFree(vel_y_dev);
hipFree(vel_z_dev);
hipFree(acl_x_dev);
hipFree(acl_y_dev);
hipFree(acl_z_dev);
hipFree(pos_x_dev_2);
hipFree(pos_y_dev_2);
hipFree(pos_z_dev_2);
hipFree(vel_x_dev_2);
hipFree(vel_y_dev_2);
hipFree(vel_z_dev_2);
hipFree(acl_x_dev_2);
hipFree(acl_y_dev_2);
hipFree(acl_z_dev_2);
hipFree(m_dev);
hipFree(dt_dev);
hipFree(epsilon_squared_dev);
hipFree(n_dev);
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(&gpu_time,start,end);
printf("\nGPU runtime: %.4f ms\n",gpu_time);
hipEventDestroy(start);
hipEventDestroy(end);
//////////////////////////////////////////////////////////////////////////
out<<"R0: "<<pos_x[particle_n/2]<<" " <<pos_y[particle_n/2]<<" " <<pos_z[particle_n/2]<<endl;
out<<"T1: "<<gpu_time<<endl;
}
int main()
{
if(name::team=="Team_X"){
printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n");
return 0;
}
std::string file_name=name::team+"_competition_2_nbody.dat";
out.open(file_name.c_str());
if(out.fail()){
printf("\ncannot open file %s to record results\n",file_name.c_str());
return 0;
}
Test_N_Body_Simulation();
return 0;
}
| 008c554dc7b40780509d4c55e60cf512ca37ff93.cu | //////////////////////////////////////////////////////////////////////////
////This is the code implementation for GPU Premier League Round 2: n-body simulation
//////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <fstream>
#include <vector>
#include <chrono>
#include <cuda_runtime.h>
using namespace std;
#define BDIMX 32
//////////////////////////////////////////////////////////////////////////
////TODO 0: Please replace the following strings with your team name and author names
////Note: Please do not use space in the string, use "_" instead
//////////////////////////////////////////////////////////////////////////
namespace name
{
std::string team="dns";
std::string author_1="Gao_Chen";
std::string author_2="Nicolas_Flores";
std::string author_3="Shikhar_Sinha";
};
//////////////////////////////////////////////////////////////////////////
////Here is a sample function implemented on CPU for n-body simulation.
__host__ void N_Body_Simulation_CPU_Poorman(double* pos_x,double* pos_y,double* pos_z, ////position array
double* vel_x,double* vel_y,double* vel_z, ////velocity array
double* acl_x,double* acl_y,double* acl_z, ////acceleration array
const double* mass, ////mass array
const int n, ////number of particles
const double dt, ////timestep
const double epsilon_squared) ////epsilon to avoid 0-denominator
{
////Step 1: set particle accelerations to be zero
memset(acl_x,0x00,sizeof(double)*n);
memset(acl_y,0x00,sizeof(double)*n);
memset(acl_z,0x00,sizeof(double)*n);
////Step 2: traverse all particle pairs and accumulate gravitational forces for each particle from pairwise interactions
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
////skip calculating force for itself
if(i==j) continue;
////r_ij=x_j-x_i
double rx=pos_x[j]-pos_x[i];
double ry=pos_y[j]-pos_y[i];
double rz=pos_z[j]-pos_z[i];
////a_ij=m_j*r_ij/(r+epsilon)^3,
////noticing that we ignore the gravitational coefficient (assuming G=1)
double dis_squared=rx*rx+ry*ry+rz*rz;
double one_over_dis_cube=1.0/pow(sqrt(dis_squared+epsilon_squared),3);
double ax=mass[j]*rx*one_over_dis_cube;
double ay=mass[j]*ry*one_over_dis_cube;
double az=mass[j]*rz*one_over_dis_cube;
////accumulate the force to the particle
acl_x[i]+=ax;
acl_y[i]+=ay;
acl_z[i]+=az;
}
}
////Step 3: explicit time integration to update the velocity and position of each particle
for(int i=0;i<n;i++){
////v_{t+1}=v_{t}+a_{t}*dt
vel_x[i]+=acl_x[i]*dt;
vel_y[i]+=acl_y[i]*dt;
vel_z[i]+=acl_z[i]*dt;
////x_{t+1}=x_{t}+v_{t}*dt
pos_x[i]+=vel_x[i]*dt;
pos_y[i]+=vel_y[i]*dt;
pos_z[i]+=vel_z[i]*dt;
}
}
//////////////////////////////////////////////////////////////////////////
////TODO 1: your GPU variables and functions start here
__global__ void N_BODY_GPU(
double* pos_x,double* pos_y,double* pos_z, ////position array
double* vel_x,double* vel_y,double* vel_z, ////velocity array
double* acl_x,double* acl_y,double* acl_z, ////acceleration array
double* pos_x_2,double* pos_y_2,double* pos_z_2, ////position array
double* vel_x_2,double* vel_y_2,double* vel_z_2, ////velocity array
double* acl_x_2,double* acl_y_2,double* acl_z_2,
const double* mass, ////mass array
const int* n, ////number of particles
const double* dt, ////timestep
const double* epsilon_squared) ////epsilon to avoid 0-denominator
{
__shared__ double x[BDIMX];
__shared__ double y[BDIMX];
__shared__ double z[BDIMX];
__shared__ double m[BDIMX];
double new_acl_x = 0;
double new_acl_y = 0;
double new_acl_z = 0;
double eps2 = epsilon_squared[0];
int bnum = blockDim.x * blockIdx.x + threadIdx.x; // the particle index
double own_pos_x = pos_x[bnum];
double own_pos_y = pos_y[bnum];
double own_pos_z = pos_z[bnum];
// need to load in values n/p times so we do:
for(int i = 0; i < n[0] / BDIMX; i++)
{
x[threadIdx.x] = pos_x[i * BDIMX + threadIdx.x];
y[threadIdx.x] = pos_y[i * BDIMX + threadIdx.x];
z[threadIdx.x] = pos_z[i * BDIMX + threadIdx.x];
m[threadIdx.x] = mass[i * BDIMX + threadIdx.x];
__syncthreads();
// do the calculation: for each of the p bodies loaded in, calculate the stuff
for(int j = 0; j < BDIMX; j+=1)
{
double r_x = x[j] - own_pos_x;
double r_y = y[j] - own_pos_y;
double r_z = z[j] - own_pos_z;
double dist_sqr = r_x * r_x + r_y * r_y + r_z * r_z + eps2;
double dist_sixth = dist_sqr * dist_sqr * dist_sqr;
double m_over_dis_cube = m[j]/sqrt(dist_sixth);
new_acl_x += r_x * m_over_dis_cube;
new_acl_y += r_y * m_over_dis_cube;
new_acl_z += r_z * m_over_dis_cube;
}
__syncthreads();
}
acl_x_2[bnum] = new_acl_x;
acl_y_2[bnum] = new_acl_y;
acl_z_2[bnum] = new_acl_z;
double new_v_x = vel_x[bnum] + new_acl_x * dt[0];
double new_v_y = vel_y[bnum] + new_acl_y * dt[0];
double new_v_z = vel_z[bnum] + new_acl_z * dt[0];
pos_x_2[bnum] = own_pos_x + new_v_x * dt[0];
pos_y_2[bnum] = own_pos_y + new_v_y * dt[0];
pos_z_2[bnum] = own_pos_z + new_v_z * dt[0];
vel_x_2[bnum] = new_v_x;
vel_y_2[bnum] = new_v_y;
vel_z_2[bnum] = new_v_z;
}
////Your implementations end here
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
////Test function for n-body simulator
ofstream out;
//////////////////////////////////////////////////////////////////////////
////Please do not change the values below
const double dt=0.001; ////time step
const int time_step_num=10; ////number of time steps
const double epsilon=1e-2; ////epsilon added in the denominator to avoid 0-division when calculating the gravitational force
const double epsilon_squared=epsilon*epsilon; ////epsilon squared
////We use grid_size=4 to help you debug your code, change it to a bigger number (e.g., 16, 32, etc.) to test the performance of your GPU code
const unsigned int grid_size=16; ////assuming particles are initialized on a background grid
const unsigned int particle_n=pow(grid_size,3); ////assuming each grid cell has one particle at the beginning
__host__ void Test_N_Body_Simulation()
{
////initialize position, velocity, acceleration, and mass
double* pos_x=new double[particle_n];
double* pos_y=new double[particle_n];
double* pos_z=new double[particle_n];
////initialize particle positions as the cell centers on a background grid
double dx=1.0/(double)grid_size;
for(unsigned int k=0;k<grid_size;k++){
for(unsigned int j=0;j<grid_size;j++){
for(unsigned int i=0;i<grid_size;i++){
unsigned int index=k*grid_size*grid_size+j*grid_size+i;
pos_x[index]=dx*(double)i;
pos_y[index]=dx*(double)j;
pos_z[index]=dx*(double)k;
}
}
}
double* vel_x=new double[particle_n];
memset(vel_x,0x00,particle_n*sizeof(double));
double* vel_y=new double[particle_n];
memset(vel_y,0x00,particle_n*sizeof(double));
double* vel_z=new double[particle_n];
memset(vel_z,0x00,particle_n*sizeof(double));
double* acl_x=new double[particle_n];
memset(acl_x,0x00,particle_n*sizeof(double));
double* acl_y=new double[particle_n];
memset(acl_y,0x00,particle_n*sizeof(double));
double* acl_z=new double[particle_n];
memset(acl_z,0x00,particle_n*sizeof(double));
double* mass=new double[particle_n];
for(int i=0;i<particle_n;i++){
mass[i]=100.0;
}
//////////////////////////////////////////////////////////////////////////
////Default implementation: n-body simulation on CPU
////Comment the CPU implementation out when you test large-scale examples
auto cpu_start=chrono::system_clock::now();
cout<<"Total number of particles: "<<particle_n<<endl;
cout<<"Tracking the motion of particle "<<particle_n/2<<endl;
// for(int i=0;i<time_step_num;i++){
// N_Body_Simulation_CPU_Poorman(pos_x,pos_y,pos_z,vel_x,vel_y,vel_z,acl_x,acl_y,acl_z,mass,particle_n,dt,epsilon_squared);
// cout<<"pos on timestep "<<i<<": "<<pos_x[particle_n/2]<<", "<<pos_y[particle_n/2]<<", "<<pos_z[particle_n/2]<<endl;
// }
auto cpu_end=chrono::system_clock::now();
chrono::duration<double> cpu_time=cpu_end-cpu_start;
cout<<"CPU runtime: "<<cpu_time.count()*1000.<<" ms."<<endl;
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
////Your implementation: n-body simulator on GPU
//////////////////////////////////////////////////////////////////////////
////TODO 2: Your GPU functions are called here
////Requirement: You need to copy data from the CPU arrays, conduct computations on the GPU, and copy the values back from GPU to CPU
////The final positions should be stored in the same place as the CPU n-body function, i.e., pos_x, pos_y, pos_z
////The correctness of your simulation will be evaluated by comparing the results (positions) with the results calculated by the default CPU implementations
//////////////////////////////////////////////////////////////////////////
for(unsigned int k=0;k<grid_size;k++){
for(unsigned int j=0;j<grid_size;j++){
for(unsigned int i=0;i<grid_size;i++){
unsigned int index=k*grid_size*grid_size+j*grid_size+i;
pos_x[index]=dx*(double)i;
pos_y[index]=dx*(double)j;
pos_z[index]=dx*(double)k;
}
}
}
memset(vel_x,0x00,particle_n*sizeof(double));
memset(vel_y,0x00,particle_n*sizeof(double));
memset(vel_z,0x00,particle_n*sizeof(double));
memset(acl_x,0x00,particle_n*sizeof(double));
memset(acl_y,0x00,particle_n*sizeof(double));
memset(acl_z,0x00,particle_n*sizeof(double));
cudaEvent_t start,end;
cudaEventCreate(&start);
cudaEventCreate(&end);
float gpu_time=0.0f;
cudaDeviceSynchronize();
cudaEventRecord(start);
double *pos_x_dev;
double *pos_y_dev;
double *pos_z_dev;
double *vel_x_dev;
double *vel_y_dev;
double *vel_z_dev;
double *acl_x_dev;
double *acl_y_dev;
double *acl_z_dev;
double *pos_x_dev_2;
double *pos_y_dev_2;
double *pos_z_dev_2;
double *vel_x_dev_2;
double *vel_y_dev_2;
double *vel_z_dev_2;
double *acl_x_dev_2;
double *acl_y_dev_2;
double *acl_z_dev_2;
double *m_dev;
double *dt_dev;
double *epsilon_squared_dev;
int *n_dev;
cudaMalloc((void**)&pos_x_dev, particle_n * sizeof(double));
cudaMalloc((void**)&pos_y_dev, particle_n * sizeof(double));
cudaMalloc((void**)&pos_z_dev, particle_n * sizeof(double));
cudaMalloc((void**)&vel_x_dev, particle_n * sizeof(double));
cudaMalloc((void**)&vel_y_dev, particle_n * sizeof(double));
cudaMalloc((void**)&vel_z_dev, particle_n * sizeof(double));
cudaMalloc((void**)&acl_x_dev, particle_n * sizeof(double));
cudaMalloc((void**)&acl_y_dev, particle_n * sizeof(double));
cudaMalloc((void**)&acl_z_dev, particle_n * sizeof(double));
cudaMalloc((void**)&pos_x_dev_2, particle_n * sizeof(double));
cudaMalloc((void**)&pos_y_dev_2, particle_n * sizeof(double));
cudaMalloc((void**)&pos_z_dev_2, particle_n * sizeof(double));
cudaMalloc((void**)&vel_x_dev_2, particle_n * sizeof(double));
cudaMalloc((void**)&vel_y_dev_2, particle_n * sizeof(double));
cudaMalloc((void**)&vel_z_dev_2, particle_n * sizeof(double));
cudaMalloc((void**)&acl_x_dev_2, particle_n * sizeof(double));
cudaMalloc((void**)&acl_y_dev_2, particle_n * sizeof(double));
cudaMalloc((void**)&acl_z_dev_2, particle_n * sizeof(double));
cudaMalloc((void**)&m_dev, particle_n * sizeof(double));
cudaMalloc((void**)&dt_dev, sizeof(double));
cudaMalloc((void**)&epsilon_squared_dev, sizeof(double));
cudaMalloc((void**)&n_dev, sizeof(int));
cudaMemcpy(pos_x_dev, pos_x, particle_n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(pos_y_dev, pos_y, particle_n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(pos_z_dev, pos_z, particle_n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(vel_x_dev, vel_x, particle_n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(vel_y_dev, vel_y, particle_n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(vel_z_dev, vel_z, particle_n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(acl_x_dev, acl_x, particle_n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(acl_y_dev, acl_y, particle_n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(acl_z_dev, acl_z, particle_n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemset(pos_x_dev_2, 0, particle_n * sizeof(double));
cudaMemset(pos_y_dev_2, 0, particle_n * sizeof(double));
cudaMemset(pos_z_dev_2, 0, particle_n * sizeof(double));
cudaMemset(vel_x_dev_2, 0, particle_n * sizeof(double));
cudaMemset(vel_y_dev_2, 0, particle_n * sizeof(double));
cudaMemset(vel_z_dev_2, 0, particle_n * sizeof(double));
cudaMemset(acl_x_dev_2, 0, particle_n * sizeof(double));
cudaMemset(acl_y_dev_2, 0, particle_n * sizeof(double));
cudaMemset(acl_z_dev_2, 0, particle_n * sizeof(double));
cudaMemcpy(m_dev, mass, particle_n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dt_dev, &dt, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(epsilon_squared_dev,&epsilon_squared, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(n_dev, &particle_n, sizeof(int), cudaMemcpyHostToDevice);
cout<<"Total number of particles: "<<particle_n<<endl;
cout<<"Tracking the motion of particle "<<particle_n/2<<endl;
for (int i = 0;i < time_step_num; i++)
{
if(i % 2 == 0)
{
N_BODY_GPU<<<dim3(particle_n/BDIMX), BDIMX>>>(
pos_x_dev, pos_y_dev, pos_z_dev,
vel_x_dev, vel_y_dev, vel_z_dev,
acl_x_dev, acl_y_dev, acl_z_dev,
pos_x_dev_2, pos_y_dev_2, pos_z_dev_2,
vel_x_dev_2, vel_y_dev_2, vel_z_dev_2,
acl_x_dev_2, acl_y_dev_2, acl_z_dev_2,
m_dev,
n_dev,
dt_dev,
epsilon_squared_dev);
cudaMemcpy(pos_x, pos_x_dev_2, particle_n * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(pos_y, pos_y_dev_2, particle_n * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(pos_z, pos_z_dev_2, particle_n * sizeof(double), cudaMemcpyDeviceToHost);
cout<<"pos on timestep "<<i<<": "<<pos_x[particle_n/2]<<", "<<pos_y[particle_n/2]<<", "<<pos_z[particle_n/2]<<endl;
}
else
{
N_BODY_GPU<<<dim3(particle_n/BDIMX), BDIMX>>>(
pos_x_dev_2, pos_y_dev_2, pos_z_dev_2,
vel_x_dev_2, vel_y_dev_2, vel_z_dev_2,
acl_x_dev_2, acl_y_dev_2, acl_z_dev_2,
pos_x_dev, pos_y_dev, pos_z_dev,
vel_x_dev, vel_y_dev, vel_z_dev,
acl_x_dev, acl_y_dev, acl_z_dev,
m_dev,
n_dev,
dt_dev,
epsilon_squared_dev);
cudaMemcpy(pos_x, pos_x_dev, particle_n * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(pos_y, pos_y_dev, particle_n * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(pos_z, pos_z_dev, particle_n * sizeof(double), cudaMemcpyDeviceToHost);
cout<<"pos on timestep "<<i<<": "<<pos_x[particle_n/2]<<", "<<pos_y[particle_n/2]<<", "<<pos_z[particle_n/2]<<endl;
}
// pos on timestep 0: 0.655429, 0.655429, 0.473758
// pos on timestep 1: -0.407697, -0.407697, -0.144373
// pos on timestep 2: -1.30937, -1.30937, -0.876057
// pos on timestep 3: -2.19476, -2.19476, -1.62789
// pos on timestep 4: -3.07345, -3.07345, -2.38426
// pos on timestep 5: -3.94812, -3.94812, -3.14275
// pos on timestep 6: -4.82021, -4.82021, -3.90248
// pos on timestep 7: -5.69054, -5.69054, -4.663
// pos on timestep 8: -6.55961, -6.55961, -5.42408
// pos on timestep 9: -7.42775, -7.42775, -6.18555
}
cudaFree(pos_x_dev);
cudaFree(pos_y_dev);
cudaFree(pos_z_dev);
cudaFree(vel_x_dev);
cudaFree(vel_y_dev);
cudaFree(vel_z_dev);
cudaFree(acl_x_dev);
cudaFree(acl_y_dev);
cudaFree(acl_z_dev);
cudaFree(pos_x_dev_2);
cudaFree(pos_y_dev_2);
cudaFree(pos_z_dev_2);
cudaFree(vel_x_dev_2);
cudaFree(vel_y_dev_2);
cudaFree(vel_z_dev_2);
cudaFree(acl_x_dev_2);
cudaFree(acl_y_dev_2);
cudaFree(acl_z_dev_2);
cudaFree(m_dev);
cudaFree(dt_dev);
cudaFree(epsilon_squared_dev);
cudaFree(n_dev);
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&gpu_time,start,end);
printf("\nGPU runtime: %.4f ms\n",gpu_time);
cudaEventDestroy(start);
cudaEventDestroy(end);
//////////////////////////////////////////////////////////////////////////
out<<"R0: "<<pos_x[particle_n/2]<<" " <<pos_y[particle_n/2]<<" " <<pos_z[particle_n/2]<<endl;
out<<"T1: "<<gpu_time<<endl;
}
int main()
{
if(name::team=="Team_X"){
printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n");
return 0;
}
std::string file_name=name::team+"_competition_2_nbody.dat";
out.open(file_name.c_str());
if(out.fail()){
printf("\ncannot open file %s to record results\n",file_name.c_str());
return 0;
}
Test_N_Body_Simulation();
return 0;
}
|
be13ae72bd4ac400460370d7c00644d38b491445.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define TIMER_CREATE(t) \
hipEvent_t t##_start, t##_end; \
hipEventCreate(&t##_start); \
hipEventCreate(&t##_end);
#define TIMER_START(t) \
hipEventRecord(t##_start); \
hipEventSynchronize(t##_start); \
#define TIMER_END(t) \
hipEventRecord(t##_end); \
hipEventSynchronize(t##_end); \
hipEventElapsedTime(&t, t##_start, t##_end); \
hipEventDestroy(t##_start); \
hipEventDestroy(t##_end);
#define TILE_SIZE 16
#define CUDA_TIMING
#define DEBUG
unsigned char *input_gpu;
unsigned char *output_gpu;
double CLOCK() {
struct timespec t;
clock_gettime(CLOCK_MONOTONIC, &t);
return (t.tv_sec * 1000)+(t.tv_nsec*1e-6);
}
/*******************************************************/
/* Cuda Error Function */
/*******************************************************/
inline hipError_t checkCuda(hipError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// Add GPU kernel and functions
// HERE!!!
__global__ void kernel(unsigned char *input,
unsigned char *output){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
output[location] = x%255;
}
void histogram_gpu(unsigned char *data,
unsigned int height,
unsigned int width){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(hipMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(hipMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(hipMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(hipMemcpy(input_gpu,
data,
size*sizeof(char),
hipMemcpyHostToDevice));
checkCuda(hipDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
// Kernel Call
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
hipLaunchKernelGGL(( kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, input_gpu,
output_gpu);
checkCuda(hipPeekAtLastError());
checkCuda(hipDeviceSynchronize());
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(hipMemcpy(data,
output_gpu,
size*sizeof(unsigned char),
hipMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(hipFree(output_gpu));
checkCuda(hipFree(input_gpu));
}
void histogram_gpu_warmup(unsigned char *data,
unsigned int height,
unsigned int width){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(hipMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(hipMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(hipMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(hipMemcpy(input_gpu,
data,
size*sizeof(char),
hipMemcpyHostToDevice));
checkCuda(hipDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
hipLaunchKernelGGL(( kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, input_gpu,
output_gpu);
checkCuda(hipDeviceSynchronize());
// Retrieve results from the GPU
checkCuda(hipMemcpy(data,
output_gpu,
size*sizeof(unsigned char),
hipMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(hipFree(output_gpu));
checkCuda(hipFree(input_gpu));
}
| be13ae72bd4ac400460370d7c00644d38b491445.cu |
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#define TIMER_CREATE(t) \
cudaEvent_t t##_start, t##_end; \
cudaEventCreate(&t##_start); \
cudaEventCreate(&t##_end);
#define TIMER_START(t) \
cudaEventRecord(t##_start); \
cudaEventSynchronize(t##_start); \
#define TIMER_END(t) \
cudaEventRecord(t##_end); \
cudaEventSynchronize(t##_end); \
cudaEventElapsedTime(&t, t##_start, t##_end); \
cudaEventDestroy(t##_start); \
cudaEventDestroy(t##_end);
#define TILE_SIZE 16
#define CUDA_TIMING
#define DEBUG
unsigned char *input_gpu;
unsigned char *output_gpu;
double CLOCK() {
struct timespec t;
clock_gettime(CLOCK_MONOTONIC, &t);
return (t.tv_sec * 1000)+(t.tv_nsec*1e-6);
}
/*******************************************************/
/* Cuda Error Function */
/*******************************************************/
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// Add GPU kernel and functions
// HERE!!!
__global__ void kernel(unsigned char *input,
unsigned char *output){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
output[location] = x%255;
}
void histogram_gpu(unsigned char *data,
unsigned int height,
unsigned int width){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu,
data,
size*sizeof(char),
cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
// Kernel Call
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
kernel<<<dimGrid, dimBlock>>>(input_gpu,
output_gpu);
checkCuda(cudaPeekAtLastError());
checkCuda(cudaDeviceSynchronize());
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(cudaMemcpy(data,
output_gpu,
size*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
}
void histogram_gpu_warmup(unsigned char *data,
unsigned int height,
unsigned int width){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu,
data,
size*sizeof(char),
cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
kernel<<<dimGrid, dimBlock>>>(input_gpu,
output_gpu);
checkCuda(cudaDeviceSynchronize());
// Retrieve results from the GPU
checkCuda(cudaMemcpy(data,
output_gpu,
size*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
}
|
a42f194043f81b672bad4a2fc1dd5f6d86246847.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <iomanip>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include <fstream>
#include <chrono>
#include "sgl.h"
#define SHA_Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
#define SHA_Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
/* Define the SHA shift, rotate left, and rotate right macros */
#define SHA256_SHR(bits,word) ((word) >> (bits))
#define SHA256_ROTL(bits,word) \
(((word) << (bits)) | ((word) >> (32-(bits))))
#define SHA256_ROTR(bits,word) \
(((word) >> (bits)) | ((word) << (32-(bits))))
/* Define the SHA SIGMA and sigma macros */
#define SHA256_SIGMA0(word) \
(SHA256_ROTR( 2,word) ^ SHA256_ROTR(13,word) ^ SHA256_ROTR(22,word))
#define SHA256_SIGMA1(word) \
(SHA256_ROTR( 6,word) ^ SHA256_ROTR(11,word) ^ SHA256_ROTR(25,word))
#define SHA256_sigma0(word) \
(SHA256_ROTR( 7,word) ^ SHA256_ROTR(18,word) ^ SHA256_SHR( 3,word))
#define SHA256_sigma1(word) \
(SHA256_ROTR(17,word) ^ SHA256_ROTR(19,word) ^ SHA256_SHR(10,word))
/* Local Function Prototypes */
__host__ __device__ void SHA224_256ProcessMessageBlock(SHA256Context *context);
__host__ __device__ void SHA224_256Finalize(SHA256Context *context,
uint8_t Pad_Byte);
__host__ __device__ void SHA224_256PadMessage(SHA256Context *context,
uint8_t Pad_Byte);
// How many to run in parallel.
const int IN_PARALLEL = 256;
/*
* SHA256Input
*
* Description:
* This function accepts an array of octets as the next portion
* of the message.
*
* Parameters:
* context: [in/out]
* The SHA context to update.
* message_array[ ]: [in]
* An array of octets representing the next portion of
* the message.
* length: [in]
* The length of the message in message_array.
*
* Returns:
* sha Error Code.
*/
__host__ __device__ void SHA256Input(SHA256Context *context, const uint8_t *message_array,
unsigned int length)
{
while (length--) {
context->Message_Block[context->Message_Block_Index++] =
*message_array;
uint32_t addTemp = context->Length_Low;
if (((context->Length_Low += 8) < addTemp) && (++context->Length_High == 0)) {
context->Corrupted = shaInputTooLong;
}
if ((context->Corrupted == shaSuccess) &&
(context->Message_Block_Index == SHA256_Message_Block_Size))
SHA224_256ProcessMessageBlock(context);
message_array++;
}
}
/*
* SHA224_256Reset
*
* Description:
* This helper function will initialize the SHA256Context in
* preparation for computing a new SHA-224 or SHA-256 message digest.
*
* Parameters:
* context: [in/out]
* The context to reset.
* H0[ ]: [in]
* The initial hash value array to use.
*
* Returns:
* sha Error Code.
*/
__host__ __device__ void SHA256Reset(SHA256Context *context)
{
context->Length_High = context->Length_Low = 0;
context->Message_Block_Index = 0;
context->Intermediate_Hash[0] = 0x6A09E667;
context->Intermediate_Hash[1] = 0xBB67AE85;
context->Intermediate_Hash[2] = 0x3C6EF372;
context->Intermediate_Hash[3] = 0xA54FF53A;
context->Intermediate_Hash[4] = 0x510E527F;
context->Intermediate_Hash[5] = 0x9B05688C;
context->Intermediate_Hash[6] = 0x1F83D9AB;
context->Intermediate_Hash[7] = 0x5BE0CD19;
context->Computed = 0;
context->Corrupted = shaSuccess;
}
/*
* SHA224_256ProcessMessageBlock
*
* Description:
* This helper function will process the next 512 bits of the
* message stored in the Message_Block array.
*
* Parameters:
* context: [in/out]
* The SHA context to update.
*
* Returns:
* Nothing.
*
* Comments:
* Many of the variable names in this code, especially the
* single character names, were used because those were the
* names used in the Secure Hash Standard.
*/
__host__ __device__ void SHA224_256ProcessMessageBlock(SHA256Context *context)
{
/* Constants defined in FIPS 180-3, section 4.2.2 */
static const uint32_t K[64] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b,
0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01,
0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7,
0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152,
0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc,
0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819,
0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08,
0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f,
0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
int t, t4; /* Loop counter */
uint32_t temp1, temp2; /* Temporary word value */
uint32_t W[64]; /* Word sequence */
uint32_t A, B, C, D, E, F, G, H; /* Word buffers */
/*
* Initialize the first 16 words in the array W
*/
for (t = t4 = 0; t < 16; t++, t4 += 4)
W[t] = (((uint32_t)context->Message_Block[t4]) << 24) |
(((uint32_t)context->Message_Block[t4 + 1]) << 16) |
(((uint32_t)context->Message_Block[t4 + 2]) << 8) |
(((uint32_t)context->Message_Block[t4 + 3]));
for (t = 16; t < 64; t++)
W[t] = SHA256_sigma1(W[t-2]) + W[t-7] +
SHA256_sigma0(W[t-15]) + W[t-16];
A = context->Intermediate_Hash[0];
B = context->Intermediate_Hash[1];
C = context->Intermediate_Hash[2];
D = context->Intermediate_Hash[3];
E = context->Intermediate_Hash[4];
F = context->Intermediate_Hash[5];
G = context->Intermediate_Hash[6];
H = context->Intermediate_Hash[7];
for (t = 0; t < 64; t++) {
temp1 = H + SHA256_SIGMA1(E) + SHA_Ch(E,F,G) + K[t] + W[t];
temp2 = SHA256_SIGMA0(A) + SHA_Maj(A,B,C);
H = G;
G = F;
F = E;
E = D + temp1;
D = C;
C = B;
B = A;
A = temp1 + temp2;
}
context->Intermediate_Hash[0] += A;
context->Intermediate_Hash[1] += B;
context->Intermediate_Hash[2] += C;
context->Intermediate_Hash[3] += D;
context->Intermediate_Hash[4] += E;
context->Intermediate_Hash[5] += F;
context->Intermediate_Hash[6] += G;
context->Intermediate_Hash[7] += H;
context->Message_Block_Index = 0;
}
/*
* SHA224_256Finalize
*
* Description:
* This helper function finishes off the digest calculations.
*
* Parameters:
* context: [in/out]
* The SHA context to update.
* Pad_Byte: [in]
* The last byte to add to the message block before the 0-padding
* and length. This will contain the last bits of the message
* followed by another single bit. If the message was an
* exact multiple of 8-bits long, Pad_Byte will be 0x80.
*
* Returns:
* sha Error Code.
*/
__host__ __device__ void SHA224_256Finalize(SHA256Context *context,
uint8_t Pad_Byte)
{
int i;
SHA224_256PadMessage(context, Pad_Byte);
/* message may be sensitive, so clear it out */
for (i = 0; i < SHA256_Message_Block_Size; ++i)
context->Message_Block[i] = 0;
context->Length_High = 0; /* and clear length */
context->Length_Low = 0;
context->Computed = 1;
}
/*
* SHA224_256PadMessage
*
* Description:
* According to the standard, the message must be padded to the next
* even multiple of 512 bits. The first padding bit must be a '1'.
* The last 64 bits represent the length of the original message.
* All bits in between should be 0. This helper function will pad
* the message according to those rules by filling the
* Message_Block array accordingly. When it returns, it can be
* assumed that the message digest has been computed.
*
* Parameters:
* context: [in/out]
* The context to pad.
* Pad_Byte: [in]
* The last byte to add to the message block before the 0-padding
* and length. This will contain the last bits of the message
* followed by another single bit. If the message was an
* exact multiple of 8-bits long, Pad_Byte will be 0x80.
*
* Returns:
* Nothing.
*/
__host__ __device__ void SHA224_256PadMessage(SHA256Context *context,
uint8_t Pad_Byte)
{
/*
* Check to see if the current message block is too small to hold
* the initial padding bits and length. If so, we will pad the
* block, process it, and then continue padding into a second
* block.
*/
if (context->Message_Block_Index >= (SHA256_Message_Block_Size-8)) {
context->Message_Block[context->Message_Block_Index++] = Pad_Byte;
while (context->Message_Block_Index < SHA256_Message_Block_Size)
context->Message_Block[context->Message_Block_Index++] = 0;
SHA224_256ProcessMessageBlock(context);
} else
context->Message_Block[context->Message_Block_Index++] = Pad_Byte;
while (context->Message_Block_Index < (SHA256_Message_Block_Size-8))
context->Message_Block[context->Message_Block_Index++] = 0;
/*
* Store the message length as the last 8 octets
*/
context->Message_Block[56] = (uint8_t)(context->Length_High >> 24);
context->Message_Block[57] = (uint8_t)(context->Length_High >> 16);
context->Message_Block[58] = (uint8_t)(context->Length_High >> 8);
context->Message_Block[59] = (uint8_t)(context->Length_High);
context->Message_Block[60] = (uint8_t)(context->Length_Low >> 24);
context->Message_Block[61] = (uint8_t)(context->Length_Low >> 16);
context->Message_Block[62] = (uint8_t)(context->Length_Low >> 8);
context->Message_Block[63] = (uint8_t)(context->Length_Low);
SHA224_256ProcessMessageBlock(context);
}
/*
* SHA224_256ResultN
*
* Description:
* This helper function will return the 224-bit or 256-bit message
* digest into the Message_Digest array provided by the caller.
* NOTE:
* The first octet of hash is stored in the element with index 0,
* the last octet of hash in the element with index 27/31.
*
* Parameters:
* context: [in/out]
* The context to use to calculate the SHA hash.
* Message_Digest[ ]: [out]
* Where the digest is returned.
* HashSize: [in]
* The size of the hash, either 28 or 32.
*
* Returns:
* sha Error Code.
*/
__host__ __device__ void SHA256Result(SHA256Context *context,
uint8_t Message_Digest[SHA256HashSize])
{
int i;
SHA224_256Finalize(context, 0x80);
for (i = 0; i < SHA256HashSize; ++i)
Message_Digest[i] = (uint8_t)
(context->Intermediate_Hash[i>>2] >> 8 * ( 3 - ( i & 0x03 ) ));
}
__host__ __device__ void hmac_combined(
const unsigned char *message_array, int length,
const unsigned char *key, int key_len,
uint8_t digest[SHA256HashSize])
{
int i;
unsigned char k_ipad[SHA256_Message_Block_Size];
unsigned char k_opad[SHA256_Message_Block_Size];
for (i = 0; i < key_len; i++) {
k_ipad[i] = key[i] ^ 0x36;
k_opad[i] = key[i] ^ 0x5c;
}
for ( ; i < SHA256_Message_Block_Size; i++) {
k_ipad[i] = 0x36;
k_opad[i] = 0x5c;
}
SHA256Context shaContext;
SHA256Context * context = &shaContext;
// Reset
context->Length_High = context->Length_Low = 0;
context->Message_Block_Index = 0;
context->Intermediate_Hash[0] = 0x6A09E667;
context->Intermediate_Hash[1] = 0xBB67AE85;
context->Intermediate_Hash[2] = 0x3C6EF372;
context->Intermediate_Hash[3] = 0xA54FF53A;
context->Intermediate_Hash[4] = 0x510E527F;
context->Intermediate_Hash[5] = 0x9B05688C;
context->Intermediate_Hash[6] = 0x1F83D9AB;
context->Intermediate_Hash[7] = 0x5BE0CD19;
context->Computed = 0;
context->Corrupted = shaSuccess;
//SHA256Input(&shaContext, k_ipad, SHA256_Message_Block_Size);
for (i = 0; i < SHA256_Message_Block_Size; i++) {
context->Message_Block[context->Message_Block_Index++] = k_ipad[i];
uint32_t addTemp = context->Length_Low;
if (((context->Length_Low += 8) < addTemp) && (++context->Length_High == 0)) {
context->Corrupted = shaInputTooLong;
}
if ((context->Corrupted == shaSuccess) &&
(context->Message_Block_Index == SHA256_Message_Block_Size))
SHA224_256ProcessMessageBlock(context);
}
//SHA256Input(&shaContext, message_array, length);
for (i = 0; i < length; i++) {
context->Message_Block[context->Message_Block_Index++] = message_array[i];
uint32_t addTemp = context->Length_Low;
if (((context->Length_Low += 8) < addTemp) && (++context->Length_High == 0)) {
context->Corrupted = shaInputTooLong;
}
if ((context->Corrupted == shaSuccess) &&
(context->Message_Block_Index == SHA256_Message_Block_Size))
SHA224_256ProcessMessageBlock(context);
}
// Result
SHA224_256Finalize(context, 0x80);
for (i = 0; i < SHA256HashSize; ++i) {
digest[i] = (uint8_t)(context->Intermediate_Hash[i>>2] >> 8 * ( 3 - ( i & 0x03 ) ));
}
// Reset
context->Length_High = context->Length_Low = 0;
context->Message_Block_Index = 0;
context->Intermediate_Hash[0] = 0x6A09E667;
context->Intermediate_Hash[1] = 0xBB67AE85;
context->Intermediate_Hash[2] = 0x3C6EF372;
context->Intermediate_Hash[3] = 0xA54FF53A;
context->Intermediate_Hash[4] = 0x510E527F;
context->Intermediate_Hash[5] = 0x9B05688C;
context->Intermediate_Hash[6] = 0x1F83D9AB;
context->Intermediate_Hash[7] = 0x5BE0CD19;
context->Computed = 0;
context->Corrupted = shaSuccess;
//SHA256Input(&shaContext, k_opad, SHA256_Message_Block_Size);
for (i = 0; i < SHA256_Message_Block_Size; i++) {
context->Message_Block[context->Message_Block_Index++] = k_opad[i];
uint32_t addTemp = context->Length_Low;
if (((context->Length_Low += 8) < addTemp) && (++context->Length_High == 0)) {
context->Corrupted = shaInputTooLong;
}
if ((context->Corrupted == shaSuccess) &&
(context->Message_Block_Index == SHA256_Message_Block_Size))
SHA224_256ProcessMessageBlock(context);
}
//SHA256Input(&shaContext, digest, SHA256HashSize);
for (i = 0; i < SHA256HashSize; i++) {
context->Message_Block[context->Message_Block_Index++] = digest[i];
uint32_t addTemp = context->Length_Low;
if (((context->Length_Low += 8) < addTemp) && (++context->Length_High == 0)) {
context->Corrupted = shaInputTooLong;
}
if ((context->Corrupted == shaSuccess) &&
(context->Message_Block_Index == SHA256_Message_Block_Size))
SHA224_256ProcessMessageBlock(context);
}
// Result
SHA224_256Finalize(context, 0x80);
for (i = 0; i < SHA256HashSize; ++i) {
digest[i] = (uint8_t)(context->Intermediate_Hash[i>>2] >> 8 * ( 3 - ( i & 0x03 ) ));
}
}
__host__ __device__ void hmac(
const unsigned char *message_array, int length,
const unsigned char *key, int key_len,
uint8_t digest[SHA256HashSize])
{
int i;
/* inner padding - key XORd with ipad */
unsigned char k_ipad[SHA256_Message_Block_Size];
/*
* The HMAC transform looks like:
*
* SHA(K XOR opad, SHA(K XOR ipad, text))
*
* where K is an n byte key, 0-padded to a total of blocksize bytes,
* ipad is the byte 0x36 repeated blocksize times,
* opad is the byte 0x5c repeated blocksize times,
* and text is the data being protected.
*/
unsigned char k_opad[SHA256_Message_Block_Size];
/* store key into the pads, XOR'd with ipad and opad values */
for (i = 0; i < key_len; i++) {
k_ipad[i] = key[i] ^ 0x36;
k_opad[i] = key[i] ^ 0x5c;
}
/* remaining pad bytes are '\0' XOR'd with ipad and opad values */
for ( ; i < SHA256_Message_Block_Size; i++) {
k_ipad[i] = 0x36;
k_opad[i] = 0x5c;
}
SHA256Context shaContext;
/* perform inner hash */
/* init context for 1st pass */
SHA256Reset(&shaContext);
/* and start with inner pad */
SHA256Input(&shaContext, k_ipad, SHA256_Message_Block_Size);
// Run on the message array.
SHA256Input(&shaContext, message_array, length);
SHA256Result(&shaContext, digest);
/* perform outer SHA */
/* init context for 2nd pass */
SHA256Reset(&shaContext);
/* start with outer pad */
SHA256Input(&shaContext, k_opad, SHA256_Message_Block_Size);
/* then results of 1st hash */
SHA256Input(&shaContext, digest, SHA256HashSize);
/* finish up 2nd pass */
SHA256Result(&shaContext, digest);
}
void HexToBytes(const std::string& hex, unsigned char * &newsalt) {
for (unsigned int i = 0; i < hex.length(); i += 2) {
std::string byteString = hex.substr(i, 2);
char byte = (char) strtol(byteString.c_str(), NULL, 16);
newsalt[i/2] = byte;
}
}
// Compile with: nvcc sgl.cu -o build/sgl
__host__ __device__ void pbkdf2(unsigned char * password, int pwsize, unsigned char * salt, uint8_t digest[SHA256HashSize]) {
// Hashing function will be sha256. hlen will therefore be 32, same as keyLen.
// Desired key length will be 32.
// iterations will be 100000.
int rounds = 100000;
hmac(
salt,
20,
password,
pwsize,
digest
);
uint8_t newdigest[32];
uint8_t runningkey[32];
memcpy(runningkey, digest, 32);
/*for (int i = 0; i < 32; i++) {
runningkey[i] = newdigest[i];
}*/
for (int i = 2; i <= rounds; i++) {
//hmac(runningkey, 32, password, pwsize, newdigest);
hmac_combined(runningkey, 32, password, pwsize, newdigest);
for (int j = 0; j < 32; j++) {
digest[j] = digest[j] ^ newdigest[j];
runningkey[j] = newdigest[j];
}
}
}
void createPbkdfSalt(unsigned char* newsalt, std::string salt) {
HexToBytes(salt, newsalt);
newsalt[16] = (1 >> 24) & 0xff;
newsalt[17] = (1 >> 16) & 0xff;
newsalt[18] = (1 >> 8) & 0xff;
newsalt[19] = (1 >> 0) & 0xff;
}
void runIteration(std::string words[18328], unsigned char * salt, unsigned char * expected) {
int rand1 = rand() % 18327;
int rand2 = rand() % 18327;
int rand3 = rand() % 18327;
std::string password = words[rand1] + " " + words[rand2] + " " + words[rand3];
uint8_t result[SHA256HashSize];
pbkdf2((unsigned char *)password.c_str(), password.size(), salt, result);
bool match = true;
for (int j = 0; j < SHA256HashSize; j++) {
if (result[j] != expected[j]) {
match = false;
break;
}
}
if (match) {
std::cout << "MATCH!!!: " << password << std::endl;
}
}
__global__
void runIterationKernel(unsigned char* passwords, int * pwsizes, unsigned char * salt, unsigned char * expected, bool matches[IN_PARALLEL]) {
uint8_t result[SHA256HashSize];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < IN_PARALLEL; i += stride) {
unsigned char * password;
password = passwords + i * 40;
pbkdf2(password, pwsizes[i], salt, result);
bool match = true;
for (int j = 0; j < SHA256HashSize; j++) {
if (result[j] != expected[j]) {
match = false;
break;
}
}
if (match) {
matches[i] = true;
}
}
}
void runInParallel() {
std::cout << "Setting up parallel run" << std::endl;
std::string words[18328];
std::string line;
std::ifstream myfile;
myfile.open ("AgileWords.txt");
if (myfile.is_open())
{
int i = 0;
while ( getline (myfile,line) )
{
words[i] = line;
i++;
}
myfile.close();
}
// ID: DOHB6DC7 -- overwritten for testing !!!!!
//std::string saltstring = "9dc661ec09c948dd16710439d157cef2";
std::string saltstring = "2db485972861e63479528bf382d1bc04";
std::string expected = "3c453512d47b37352bf2c5c1408ea4d9f46c48878782843a685c0c7e54232ba0";
//std::string expected = "4073c5e1cbd7790347b26e0447795220cd933689219b3446da294f509a583d48";
hipDeviceProp_t properties;
hipGetDeviceProperties(&properties, 0);
std::cout << properties.name << std::endl;
std::cout << "Threads per block: " << properties.maxThreadsPerBlock << std::endl;
auto started = std::chrono::high_resolution_clock::now();
std::string originals[IN_PARALLEL];
unsigned char * passwords;
int *pwsizes;
bool *matches;
unsigned char * salt;
unsigned char * expectedBytes;
hipMallocManaged(&passwords, IN_PARALLEL * 40 * IN_PARALLEL * sizeof(char));
hipMallocManaged(&pwsizes, IN_PARALLEL * sizeof(int));
hipMallocManaged(&matches, IN_PARALLEL * sizeof(bool));
hipMallocManaged(&salt, 20 * sizeof(char));
hipMallocManaged(&expectedBytes, 32 * sizeof(char));
createPbkdfSalt(salt, saltstring);
HexToBytes(expected, expectedBytes);
for (int i = 0; i < IN_PARALLEL; i++) {
if (i == 16) {
originals[i] = "glassy ubiquity absence";
} else {
int rand1 = rand() % 18327;
int rand2 = rand() % 18327;
int rand3 = rand() % 18327;
originals[i] = words[rand1] + " " + words[rand2] + " " + words[rand3];
}
pwsizes[i] = originals[i].size();
unsigned char * password = (unsigned char *)originals[i].c_str();
for (int j = 0; j < 40; j++) {
if (j < pwsizes[i]) {
passwords[i*40 + j] = password[j];
} else {
passwords[i*40 + j] = 0x00000000;
}
}
matches[i] = false;
}
unsigned char * password = passwords + 16 * 40;
for (int i = 0; i < pwsizes[16]; i++) {
std::cout << password[i];
}
std::cout << std::endl;
hipError_t error;
int numblocks = 4;
int blocksize = IN_PARALLEL / numblocks;
hipLaunchKernelGGL(( runIterationKernel), dim3(numblocks), dim3(blocksize), 0, 0, passwords, pwsizes, salt, expectedBytes, matches);
std::cout << "Running parallel" << std::endl;
hipDeviceSynchronize();
error = hipGetLastError();
std::cout << hipGetErrorName(error) << ": " << hipGetErrorString(error) << std::endl;
std::cout << "Synchronized" << std::endl;
for (int k = 0; k < IN_PARALLEL; k++) {
if (matches[k]) {
std::cout << "MATCH!!!: " << originals[k] << std::endl;
} else {
//std::cout << "Did not match: " << originals[k] << std::endl;
}
}
hipFree(passwords);
hipFree(pwsizes);
hipFree(matches);
hipFree(salt);
hipFree(expectedBytes);
auto done = std::chrono::high_resolution_clock::now();
double totalTime = std::chrono::duration_cast<std::chrono::milliseconds>(done-started).count();
totalTime = totalTime / 1000;
std::cout << "Total time taken: " << std::fixed << totalTime << "s" << std::endl;
// Number of combinations = 6,156,660,800,000
// if 5/sec is 39,000 years
// if 1000 takes 1min30sec then 17570
// if 10000 takes 5min then 5856 years
// And now with parallel cuda
// about 1500/sec? = 130 years...
}
void loadWords() {
std::string words[18328];
std::string line;
std::ifstream myfile;
myfile.open ("AgileWords.txt");
if (myfile.is_open())
{
int i = 0;
while ( getline (myfile,line) )
{
words[i] = line;
i++;
}
myfile.close();
}
std::cout << "Words loaded" <<std::endl;
// ID: DOHB6DC7
std::string saltstring = "9dc661ec09c948dd16710439d157cef2";
unsigned char * salt = (unsigned char *)malloc(20);
createPbkdfSalt(salt, saltstring);
std::string expected = "4073c5e1cbd7790347b26e0447795220cd933689219b3446da294f509a583d48";
unsigned char * expectedBytes = (unsigned char *)malloc(32);
HexToBytes(expected, expectedBytes);
int attempts = 10;
std::cout << "About to start loop" <<std::endl;
auto started = std::chrono::high_resolution_clock::now();
for (int i = 0; i < attempts; i++) {
runIteration(words, salt, expectedBytes);
}
auto done = std::chrono::high_resolution_clock::now();
std::cout << "Loop done" <<std::endl;
double totalTime = std::chrono::duration_cast<std::chrono::milliseconds>(done-started).count();
totalTime = totalTime / 1000;
std::cout << "Total time taken: " << std::fixed << totalTime << "s" << std::endl;
}
__global__
void increase(int n, int *x, bool *b)
{
for (int i = 0; i < n; i++) {
if (b[i]) {
x[i] = x[i] + 20;
}
}
}
void testCuda() {
int N = 5;
int *x;
bool *b;
hipMallocManaged(&x, N*sizeof(int));
hipMallocManaged(&b, N*sizeof(bool));
for (int i = 0; i < N; i++) {
x[i] = i;
if (i % 3 == 0) {
b[i] = false;
} else {
b[i] = true;
}
}
hipLaunchKernelGGL(( increase), dim3(1),dim3(1), 0, 0, N, x, b);
hipDeviceSynchronize();
for (int i = 0; i < N; i++) {
std::cout << std::dec << x[i] << std::endl;
}
hipFree(x);
hipFree(b);
}
int main(void)
{
std::cout << "Running" << std::endl;
std::string testpw = "glassy ubiquity absence";
std::string testsalt = "2db485972861e63479528bf382d1bc04";
std::string testhash = "3c453512d47b37352bf2c5c1408ea4d9f46c48878782843a685c0c7e54232ba0";
unsigned char * newsalt = (unsigned char *)malloc(20);
createPbkdfSalt(newsalt, testsalt);
uint8_t prk[SHA256HashSize];
hmac(
newsalt,
16,
(unsigned char *)testpw.c_str(),
testpw.size(),
prk
);
for (int i = 0; i < SHA256HashSize; i++) {
std::cout << std::setw(2) << std::setfill('0') << std::hex << static_cast<int>(prk[i]);
}
std::cout << std::endl;
std::cout << "hmac Done" << std::endl;
uint8_t pdprk[SHA256HashSize];
pbkdf2((unsigned char *)testpw.c_str(), testpw.size(), newsalt, pdprk);
for (int i = 0; i < SHA256HashSize; i++) {
//printf("%x", prk[i]);
std::cout << std::setw(2) << std::setfill('0') << std::hex << static_cast<int>(pdprk[i]);
}
std::cout << std::endl;
unsigned char * expectedBytes = (unsigned char *)malloc(32);
HexToBytes(testhash, expectedBytes);
bool match = true;
for (int j = 0; j < SHA256HashSize; j++) {
if (pdprk[j] != expectedBytes[j]) {
match = false;
break;
}
}
if (match) {
std::cout << "pbkdf2 Test hash matched" << std::endl;
}
std::cout << "pbkdf2 Done" << std::endl;
// The cracking..
//loadWords();
testCuda();
runInParallel();
return 0;
}
| a42f194043f81b672bad4a2fc1dd5f6d86246847.cu | #include <iostream>
#include <iomanip>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include <fstream>
#include <chrono>
#include "sgl.h"
#define SHA_Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
#define SHA_Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
/* Define the SHA shift, rotate left, and rotate right macros */
#define SHA256_SHR(bits,word) ((word) >> (bits))
#define SHA256_ROTL(bits,word) \
(((word) << (bits)) | ((word) >> (32-(bits))))
#define SHA256_ROTR(bits,word) \
(((word) >> (bits)) | ((word) << (32-(bits))))
/* Define the SHA SIGMA and sigma macros */
#define SHA256_SIGMA0(word) \
(SHA256_ROTR( 2,word) ^ SHA256_ROTR(13,word) ^ SHA256_ROTR(22,word))
#define SHA256_SIGMA1(word) \
(SHA256_ROTR( 6,word) ^ SHA256_ROTR(11,word) ^ SHA256_ROTR(25,word))
#define SHA256_sigma0(word) \
(SHA256_ROTR( 7,word) ^ SHA256_ROTR(18,word) ^ SHA256_SHR( 3,word))
#define SHA256_sigma1(word) \
(SHA256_ROTR(17,word) ^ SHA256_ROTR(19,word) ^ SHA256_SHR(10,word))
/* Local Function Prototypes */
__host__ __device__ void SHA224_256ProcessMessageBlock(SHA256Context *context);
__host__ __device__ void SHA224_256Finalize(SHA256Context *context,
uint8_t Pad_Byte);
__host__ __device__ void SHA224_256PadMessage(SHA256Context *context,
uint8_t Pad_Byte);
// How many to run in parallel.
const int IN_PARALLEL = 256;
/*
* SHA256Input
*
* Description:
* This function accepts an array of octets as the next portion
* of the message.
*
* Parameters:
* context: [in/out]
* The SHA context to update.
* message_array[ ]: [in]
* An array of octets representing the next portion of
* the message.
* length: [in]
* The length of the message in message_array.
*
* Returns:
* sha Error Code.
*/
__host__ __device__ void SHA256Input(SHA256Context *context, const uint8_t *message_array,
unsigned int length)
{
while (length--) {
context->Message_Block[context->Message_Block_Index++] =
*message_array;
uint32_t addTemp = context->Length_Low;
if (((context->Length_Low += 8) < addTemp) && (++context->Length_High == 0)) {
context->Corrupted = shaInputTooLong;
}
if ((context->Corrupted == shaSuccess) &&
(context->Message_Block_Index == SHA256_Message_Block_Size))
SHA224_256ProcessMessageBlock(context);
message_array++;
}
}
/*
* SHA224_256Reset
*
* Description:
* This helper function will initialize the SHA256Context in
* preparation for computing a new SHA-224 or SHA-256 message digest.
*
* Parameters:
* context: [in/out]
* The context to reset.
* H0[ ]: [in]
* The initial hash value array to use.
*
* Returns:
* sha Error Code.
*/
__host__ __device__ void SHA256Reset(SHA256Context *context)
{
context->Length_High = context->Length_Low = 0;
context->Message_Block_Index = 0;
context->Intermediate_Hash[0] = 0x6A09E667;
context->Intermediate_Hash[1] = 0xBB67AE85;
context->Intermediate_Hash[2] = 0x3C6EF372;
context->Intermediate_Hash[3] = 0xA54FF53A;
context->Intermediate_Hash[4] = 0x510E527F;
context->Intermediate_Hash[5] = 0x9B05688C;
context->Intermediate_Hash[6] = 0x1F83D9AB;
context->Intermediate_Hash[7] = 0x5BE0CD19;
context->Computed = 0;
context->Corrupted = shaSuccess;
}
/*
* SHA224_256ProcessMessageBlock
*
* Description:
* This helper function will process the next 512 bits of the
* message stored in the Message_Block array.
*
* Parameters:
* context: [in/out]
* The SHA context to update.
*
* Returns:
* Nothing.
*
* Comments:
* Many of the variable names in this code, especially the
* single character names, were used because those were the
* names used in the Secure Hash Standard.
*/
__host__ __device__ void SHA224_256ProcessMessageBlock(SHA256Context *context)
{
/* Constants defined in FIPS 180-3, section 4.2.2 */
static const uint32_t K[64] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b,
0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01,
0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7,
0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152,
0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc,
0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819,
0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08,
0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f,
0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
int t, t4; /* Loop counter */
uint32_t temp1, temp2; /* Temporary word value */
uint32_t W[64]; /* Word sequence */
uint32_t A, B, C, D, E, F, G, H; /* Word buffers */
/*
* Initialize the first 16 words in the array W
*/
for (t = t4 = 0; t < 16; t++, t4 += 4)
W[t] = (((uint32_t)context->Message_Block[t4]) << 24) |
(((uint32_t)context->Message_Block[t4 + 1]) << 16) |
(((uint32_t)context->Message_Block[t4 + 2]) << 8) |
(((uint32_t)context->Message_Block[t4 + 3]));
for (t = 16; t < 64; t++)
W[t] = SHA256_sigma1(W[t-2]) + W[t-7] +
SHA256_sigma0(W[t-15]) + W[t-16];
A = context->Intermediate_Hash[0];
B = context->Intermediate_Hash[1];
C = context->Intermediate_Hash[2];
D = context->Intermediate_Hash[3];
E = context->Intermediate_Hash[4];
F = context->Intermediate_Hash[5];
G = context->Intermediate_Hash[6];
H = context->Intermediate_Hash[7];
for (t = 0; t < 64; t++) {
temp1 = H + SHA256_SIGMA1(E) + SHA_Ch(E,F,G) + K[t] + W[t];
temp2 = SHA256_SIGMA0(A) + SHA_Maj(A,B,C);
H = G;
G = F;
F = E;
E = D + temp1;
D = C;
C = B;
B = A;
A = temp1 + temp2;
}
context->Intermediate_Hash[0] += A;
context->Intermediate_Hash[1] += B;
context->Intermediate_Hash[2] += C;
context->Intermediate_Hash[3] += D;
context->Intermediate_Hash[4] += E;
context->Intermediate_Hash[5] += F;
context->Intermediate_Hash[6] += G;
context->Intermediate_Hash[7] += H;
context->Message_Block_Index = 0;
}
/*
* SHA224_256Finalize
*
* Description:
* This helper function finishes off the digest calculations.
*
* Parameters:
* context: [in/out]
* The SHA context to update.
* Pad_Byte: [in]
* The last byte to add to the message block before the 0-padding
* and length. This will contain the last bits of the message
* followed by another single bit. If the message was an
* exact multiple of 8-bits long, Pad_Byte will be 0x80.
*
* Returns:
* sha Error Code.
*/
__host__ __device__ void SHA224_256Finalize(SHA256Context *context,
uint8_t Pad_Byte)
{
int i;
SHA224_256PadMessage(context, Pad_Byte);
/* message may be sensitive, so clear it out */
for (i = 0; i < SHA256_Message_Block_Size; ++i)
context->Message_Block[i] = 0;
context->Length_High = 0; /* and clear length */
context->Length_Low = 0;
context->Computed = 1;
}
/*
* SHA224_256PadMessage
*
* Description:
* According to the standard, the message must be padded to the next
* even multiple of 512 bits. The first padding bit must be a '1'.
* The last 64 bits represent the length of the original message.
* All bits in between should be 0. This helper function will pad
* the message according to those rules by filling the
* Message_Block array accordingly. When it returns, it can be
* assumed that the message digest has been computed.
*
* Parameters:
* context: [in/out]
* The context to pad.
* Pad_Byte: [in]
* The last byte to add to the message block before the 0-padding
* and length. This will contain the last bits of the message
* followed by another single bit. If the message was an
* exact multiple of 8-bits long, Pad_Byte will be 0x80.
*
* Returns:
* Nothing.
*/
__host__ __device__ void SHA224_256PadMessage(SHA256Context *context,
uint8_t Pad_Byte)
{
/*
* Check to see if the current message block is too small to hold
* the initial padding bits and length. If so, we will pad the
* block, process it, and then continue padding into a second
* block.
*/
if (context->Message_Block_Index >= (SHA256_Message_Block_Size-8)) {
context->Message_Block[context->Message_Block_Index++] = Pad_Byte;
while (context->Message_Block_Index < SHA256_Message_Block_Size)
context->Message_Block[context->Message_Block_Index++] = 0;
SHA224_256ProcessMessageBlock(context);
} else
context->Message_Block[context->Message_Block_Index++] = Pad_Byte;
while (context->Message_Block_Index < (SHA256_Message_Block_Size-8))
context->Message_Block[context->Message_Block_Index++] = 0;
/*
* Store the message length as the last 8 octets
*/
context->Message_Block[56] = (uint8_t)(context->Length_High >> 24);
context->Message_Block[57] = (uint8_t)(context->Length_High >> 16);
context->Message_Block[58] = (uint8_t)(context->Length_High >> 8);
context->Message_Block[59] = (uint8_t)(context->Length_High);
context->Message_Block[60] = (uint8_t)(context->Length_Low >> 24);
context->Message_Block[61] = (uint8_t)(context->Length_Low >> 16);
context->Message_Block[62] = (uint8_t)(context->Length_Low >> 8);
context->Message_Block[63] = (uint8_t)(context->Length_Low);
SHA224_256ProcessMessageBlock(context);
}
/*
* SHA224_256ResultN
*
* Description:
* This helper function will return the 224-bit or 256-bit message
* digest into the Message_Digest array provided by the caller.
* NOTE:
* The first octet of hash is stored in the element with index 0,
* the last octet of hash in the element with index 27/31.
*
* Parameters:
* context: [in/out]
* The context to use to calculate the SHA hash.
* Message_Digest[ ]: [out]
* Where the digest is returned.
* HashSize: [in]
* The size of the hash, either 28 or 32.
*
* Returns:
* sha Error Code.
*/
__host__ __device__ void SHA256Result(SHA256Context *context,
uint8_t Message_Digest[SHA256HashSize])
{
int i;
SHA224_256Finalize(context, 0x80);
for (i = 0; i < SHA256HashSize; ++i)
Message_Digest[i] = (uint8_t)
(context->Intermediate_Hash[i>>2] >> 8 * ( 3 - ( i & 0x03 ) ));
}
__host__ __device__ void hmac_combined(
const unsigned char *message_array, int length,
const unsigned char *key, int key_len,
uint8_t digest[SHA256HashSize])
{
int i;
unsigned char k_ipad[SHA256_Message_Block_Size];
unsigned char k_opad[SHA256_Message_Block_Size];
for (i = 0; i < key_len; i++) {
k_ipad[i] = key[i] ^ 0x36;
k_opad[i] = key[i] ^ 0x5c;
}
for ( ; i < SHA256_Message_Block_Size; i++) {
k_ipad[i] = 0x36;
k_opad[i] = 0x5c;
}
SHA256Context shaContext;
SHA256Context * context = &shaContext;
// Reset
context->Length_High = context->Length_Low = 0;
context->Message_Block_Index = 0;
context->Intermediate_Hash[0] = 0x6A09E667;
context->Intermediate_Hash[1] = 0xBB67AE85;
context->Intermediate_Hash[2] = 0x3C6EF372;
context->Intermediate_Hash[3] = 0xA54FF53A;
context->Intermediate_Hash[4] = 0x510E527F;
context->Intermediate_Hash[5] = 0x9B05688C;
context->Intermediate_Hash[6] = 0x1F83D9AB;
context->Intermediate_Hash[7] = 0x5BE0CD19;
context->Computed = 0;
context->Corrupted = shaSuccess;
//SHA256Input(&shaContext, k_ipad, SHA256_Message_Block_Size);
for (i = 0; i < SHA256_Message_Block_Size; i++) {
context->Message_Block[context->Message_Block_Index++] = k_ipad[i];
uint32_t addTemp = context->Length_Low;
if (((context->Length_Low += 8) < addTemp) && (++context->Length_High == 0)) {
context->Corrupted = shaInputTooLong;
}
if ((context->Corrupted == shaSuccess) &&
(context->Message_Block_Index == SHA256_Message_Block_Size))
SHA224_256ProcessMessageBlock(context);
}
//SHA256Input(&shaContext, message_array, length);
for (i = 0; i < length; i++) {
context->Message_Block[context->Message_Block_Index++] = message_array[i];
uint32_t addTemp = context->Length_Low;
if (((context->Length_Low += 8) < addTemp) && (++context->Length_High == 0)) {
context->Corrupted = shaInputTooLong;
}
if ((context->Corrupted == shaSuccess) &&
(context->Message_Block_Index == SHA256_Message_Block_Size))
SHA224_256ProcessMessageBlock(context);
}
// Result
SHA224_256Finalize(context, 0x80);
for (i = 0; i < SHA256HashSize; ++i) {
digest[i] = (uint8_t)(context->Intermediate_Hash[i>>2] >> 8 * ( 3 - ( i & 0x03 ) ));
}
// Reset
context->Length_High = context->Length_Low = 0;
context->Message_Block_Index = 0;
context->Intermediate_Hash[0] = 0x6A09E667;
context->Intermediate_Hash[1] = 0xBB67AE85;
context->Intermediate_Hash[2] = 0x3C6EF372;
context->Intermediate_Hash[3] = 0xA54FF53A;
context->Intermediate_Hash[4] = 0x510E527F;
context->Intermediate_Hash[5] = 0x9B05688C;
context->Intermediate_Hash[6] = 0x1F83D9AB;
context->Intermediate_Hash[7] = 0x5BE0CD19;
context->Computed = 0;
context->Corrupted = shaSuccess;
//SHA256Input(&shaContext, k_opad, SHA256_Message_Block_Size);
for (i = 0; i < SHA256_Message_Block_Size; i++) {
context->Message_Block[context->Message_Block_Index++] = k_opad[i];
uint32_t addTemp = context->Length_Low;
if (((context->Length_Low += 8) < addTemp) && (++context->Length_High == 0)) {
context->Corrupted = shaInputTooLong;
}
if ((context->Corrupted == shaSuccess) &&
(context->Message_Block_Index == SHA256_Message_Block_Size))
SHA224_256ProcessMessageBlock(context);
}
//SHA256Input(&shaContext, digest, SHA256HashSize);
for (i = 0; i < SHA256HashSize; i++) {
context->Message_Block[context->Message_Block_Index++] = digest[i];
uint32_t addTemp = context->Length_Low;
if (((context->Length_Low += 8) < addTemp) && (++context->Length_High == 0)) {
context->Corrupted = shaInputTooLong;
}
if ((context->Corrupted == shaSuccess) &&
(context->Message_Block_Index == SHA256_Message_Block_Size))
SHA224_256ProcessMessageBlock(context);
}
// Result
SHA224_256Finalize(context, 0x80);
for (i = 0; i < SHA256HashSize; ++i) {
digest[i] = (uint8_t)(context->Intermediate_Hash[i>>2] >> 8 * ( 3 - ( i & 0x03 ) ));
}
}
__host__ __device__ void hmac(
const unsigned char *message_array, int length,
const unsigned char *key, int key_len,
uint8_t digest[SHA256HashSize])
{
int i;
/* inner padding - key XORd with ipad */
unsigned char k_ipad[SHA256_Message_Block_Size];
/*
* The HMAC transform looks like:
*
* SHA(K XOR opad, SHA(K XOR ipad, text))
*
* where K is an n byte key, 0-padded to a total of blocksize bytes,
* ipad is the byte 0x36 repeated blocksize times,
* opad is the byte 0x5c repeated blocksize times,
* and text is the data being protected.
*/
unsigned char k_opad[SHA256_Message_Block_Size];
/* store key into the pads, XOR'd with ipad and opad values */
for (i = 0; i < key_len; i++) {
k_ipad[i] = key[i] ^ 0x36;
k_opad[i] = key[i] ^ 0x5c;
}
/* remaining pad bytes are '\0' XOR'd with ipad and opad values */
for ( ; i < SHA256_Message_Block_Size; i++) {
k_ipad[i] = 0x36;
k_opad[i] = 0x5c;
}
SHA256Context shaContext;
/* perform inner hash */
/* init context for 1st pass */
SHA256Reset(&shaContext);
/* and start with inner pad */
SHA256Input(&shaContext, k_ipad, SHA256_Message_Block_Size);
// Run on the message array.
SHA256Input(&shaContext, message_array, length);
SHA256Result(&shaContext, digest);
/* perform outer SHA */
/* init context for 2nd pass */
SHA256Reset(&shaContext);
/* start with outer pad */
SHA256Input(&shaContext, k_opad, SHA256_Message_Block_Size);
/* then results of 1st hash */
SHA256Input(&shaContext, digest, SHA256HashSize);
/* finish up 2nd pass */
SHA256Result(&shaContext, digest);
}
void HexToBytes(const std::string& hex, unsigned char * &newsalt) {
for (unsigned int i = 0; i < hex.length(); i += 2) {
std::string byteString = hex.substr(i, 2);
char byte = (char) strtol(byteString.c_str(), NULL, 16);
newsalt[i/2] = byte;
}
}
// Compile with: nvcc sgl.cu -o build/sgl
__host__ __device__ void pbkdf2(unsigned char * password, int pwsize, unsigned char * salt, uint8_t digest[SHA256HashSize]) {
// Hashing function will be sha256. hlen will therefore be 32, same as keyLen.
// Desired key length will be 32.
// iterations will be 100000.
int rounds = 100000;
hmac(
salt,
20,
password,
pwsize,
digest
);
uint8_t newdigest[32];
uint8_t runningkey[32];
memcpy(runningkey, digest, 32);
/*for (int i = 0; i < 32; i++) {
runningkey[i] = newdigest[i];
}*/
for (int i = 2; i <= rounds; i++) {
//hmac(runningkey, 32, password, pwsize, newdigest);
hmac_combined(runningkey, 32, password, pwsize, newdigest);
for (int j = 0; j < 32; j++) {
digest[j] = digest[j] ^ newdigest[j];
runningkey[j] = newdigest[j];
}
}
}
void createPbkdfSalt(unsigned char* newsalt, std::string salt) {
HexToBytes(salt, newsalt);
newsalt[16] = (1 >> 24) & 0xff;
newsalt[17] = (1 >> 16) & 0xff;
newsalt[18] = (1 >> 8) & 0xff;
newsalt[19] = (1 >> 0) & 0xff;
}
void runIteration(std::string words[18328], unsigned char * salt, unsigned char * expected) {
int rand1 = rand() % 18327;
int rand2 = rand() % 18327;
int rand3 = rand() % 18327;
std::string password = words[rand1] + " " + words[rand2] + " " + words[rand3];
uint8_t result[SHA256HashSize];
pbkdf2((unsigned char *)password.c_str(), password.size(), salt, result);
bool match = true;
for (int j = 0; j < SHA256HashSize; j++) {
if (result[j] != expected[j]) {
match = false;
break;
}
}
if (match) {
std::cout << "MATCH!!!: " << password << std::endl;
}
}
__global__
void runIterationKernel(unsigned char* passwords, int * pwsizes, unsigned char * salt, unsigned char * expected, bool matches[IN_PARALLEL]) {
uint8_t result[SHA256HashSize];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < IN_PARALLEL; i += stride) {
unsigned char * password;
password = passwords + i * 40;
pbkdf2(password, pwsizes[i], salt, result);
bool match = true;
for (int j = 0; j < SHA256HashSize; j++) {
if (result[j] != expected[j]) {
match = false;
break;
}
}
if (match) {
matches[i] = true;
}
}
}
void runInParallel() {
std::cout << "Setting up parallel run" << std::endl;
std::string words[18328];
std::string line;
std::ifstream myfile;
myfile.open ("AgileWords.txt");
if (myfile.is_open())
{
int i = 0;
while ( getline (myfile,line) )
{
words[i] = line;
i++;
}
myfile.close();
}
// ID: DOHB6DC7 -- overwritten for testing !!!!!
//std::string saltstring = "9dc661ec09c948dd16710439d157cef2";
std::string saltstring = "2db485972861e63479528bf382d1bc04";
std::string expected = "3c453512d47b37352bf2c5c1408ea4d9f46c48878782843a685c0c7e54232ba0";
//std::string expected = "4073c5e1cbd7790347b26e0447795220cd933689219b3446da294f509a583d48";
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, 0);
std::cout << properties.name << std::endl;
std::cout << "Threads per block: " << properties.maxThreadsPerBlock << std::endl;
auto started = std::chrono::high_resolution_clock::now();
std::string originals[IN_PARALLEL];
unsigned char * passwords;
int *pwsizes;
bool *matches;
unsigned char * salt;
unsigned char * expectedBytes;
cudaMallocManaged(&passwords, IN_PARALLEL * 40 * IN_PARALLEL * sizeof(char));
cudaMallocManaged(&pwsizes, IN_PARALLEL * sizeof(int));
cudaMallocManaged(&matches, IN_PARALLEL * sizeof(bool));
cudaMallocManaged(&salt, 20 * sizeof(char));
cudaMallocManaged(&expectedBytes, 32 * sizeof(char));
createPbkdfSalt(salt, saltstring);
HexToBytes(expected, expectedBytes);
for (int i = 0; i < IN_PARALLEL; i++) {
if (i == 16) {
originals[i] = "glassy ubiquity absence";
} else {
int rand1 = rand() % 18327;
int rand2 = rand() % 18327;
int rand3 = rand() % 18327;
originals[i] = words[rand1] + " " + words[rand2] + " " + words[rand3];
}
pwsizes[i] = originals[i].size();
unsigned char * password = (unsigned char *)originals[i].c_str();
for (int j = 0; j < 40; j++) {
if (j < pwsizes[i]) {
passwords[i*40 + j] = password[j];
} else {
passwords[i*40 + j] = 0x00000000;
}
}
matches[i] = false;
}
unsigned char * password = passwords + 16 * 40;
for (int i = 0; i < pwsizes[16]; i++) {
std::cout << password[i];
}
std::cout << std::endl;
cudaError_t error;
int numblocks = 4;
int blocksize = IN_PARALLEL / numblocks;
runIterationKernel<<<numblocks, blocksize>>>(passwords, pwsizes, salt, expectedBytes, matches);
std::cout << "Running parallel" << std::endl;
cudaDeviceSynchronize();
error = cudaGetLastError();
std::cout << cudaGetErrorName(error) << ": " << cudaGetErrorString(error) << std::endl;
std::cout << "Synchronized" << std::endl;
for (int k = 0; k < IN_PARALLEL; k++) {
if (matches[k]) {
std::cout << "MATCH!!!: " << originals[k] << std::endl;
} else {
//std::cout << "Did not match: " << originals[k] << std::endl;
}
}
cudaFree(passwords);
cudaFree(pwsizes);
cudaFree(matches);
cudaFree(salt);
cudaFree(expectedBytes);
auto done = std::chrono::high_resolution_clock::now();
double totalTime = std::chrono::duration_cast<std::chrono::milliseconds>(done-started).count();
totalTime = totalTime / 1000;
std::cout << "Total time taken: " << std::fixed << totalTime << "s" << std::endl;
// Number of combinations = 6,156,660,800,000
// if 5/sec is 39,000 years
// if 1000 takes 1min30sec then 17570
// if 10000 takes 5min then 5856 years
// And now with parallel cuda
// about 1500/sec? = 130 years...
}
void loadWords() {
std::string words[18328];
std::string line;
std::ifstream myfile;
myfile.open ("AgileWords.txt");
if (myfile.is_open())
{
int i = 0;
while ( getline (myfile,line) )
{
words[i] = line;
i++;
}
myfile.close();
}
std::cout << "Words loaded" <<std::endl;
// ID: DOHB6DC7
std::string saltstring = "9dc661ec09c948dd16710439d157cef2";
unsigned char * salt = (unsigned char *)malloc(20);
createPbkdfSalt(salt, saltstring);
std::string expected = "4073c5e1cbd7790347b26e0447795220cd933689219b3446da294f509a583d48";
unsigned char * expectedBytes = (unsigned char *)malloc(32);
HexToBytes(expected, expectedBytes);
int attempts = 10;
std::cout << "About to start loop" <<std::endl;
auto started = std::chrono::high_resolution_clock::now();
for (int i = 0; i < attempts; i++) {
runIteration(words, salt, expectedBytes);
}
auto done = std::chrono::high_resolution_clock::now();
std::cout << "Loop done" <<std::endl;
double totalTime = std::chrono::duration_cast<std::chrono::milliseconds>(done-started).count();
totalTime = totalTime / 1000;
std::cout << "Total time taken: " << std::fixed << totalTime << "s" << std::endl;
}
__global__
void increase(int n, int *x, bool *b)
{
for (int i = 0; i < n; i++) {
if (b[i]) {
x[i] = x[i] + 20;
}
}
}
void testCuda() {
int N = 5;
int *x;
bool *b;
cudaMallocManaged(&x, N*sizeof(int));
cudaMallocManaged(&b, N*sizeof(bool));
for (int i = 0; i < N; i++) {
x[i] = i;
if (i % 3 == 0) {
b[i] = false;
} else {
b[i] = true;
}
}
increase<<<1,1>>>(N, x, b);
cudaDeviceSynchronize();
for (int i = 0; i < N; i++) {
std::cout << std::dec << x[i] << std::endl;
}
cudaFree(x);
cudaFree(b);
}
int main(void)
{
std::cout << "Running" << std::endl;
std::string testpw = "glassy ubiquity absence";
std::string testsalt = "2db485972861e63479528bf382d1bc04";
std::string testhash = "3c453512d47b37352bf2c5c1408ea4d9f46c48878782843a685c0c7e54232ba0";
unsigned char * newsalt = (unsigned char *)malloc(20);
createPbkdfSalt(newsalt, testsalt);
uint8_t prk[SHA256HashSize];
hmac(
newsalt,
16,
(unsigned char *)testpw.c_str(),
testpw.size(),
prk
);
for (int i = 0; i < SHA256HashSize; i++) {
std::cout << std::setw(2) << std::setfill('0') << std::hex << static_cast<int>(prk[i]);
}
std::cout << std::endl;
std::cout << "hmac Done" << std::endl;
uint8_t pdprk[SHA256HashSize];
pbkdf2((unsigned char *)testpw.c_str(), testpw.size(), newsalt, pdprk);
for (int i = 0; i < SHA256HashSize; i++) {
//printf("%x", prk[i]);
std::cout << std::setw(2) << std::setfill('0') << std::hex << static_cast<int>(pdprk[i]);
}
std::cout << std::endl;
unsigned char * expectedBytes = (unsigned char *)malloc(32);
HexToBytes(testhash, expectedBytes);
bool match = true;
for (int j = 0; j < SHA256HashSize; j++) {
if (pdprk[j] != expectedBytes[j]) {
match = false;
break;
}
}
if (match) {
std::cout << "pbkdf2 Test hash matched" << std::endl;
}
std::cout << "pbkdf2 Done" << std::endl;
// The cracking..
//loadWords();
testCuda();
runInParallel();
return 0;
}
|
1b1e28e93c6b39fee81df555bf1013c687cdfd3b.hip | // !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
#define BLOCK_SIZE 16
#define CWIDTH 16
#define CHEIGHT 16
#define FWIDTH 3
#define FHEIGHT 3
#define CNUMBER 16
#define COUT 16
__shared__ float filteroutshared[CHEIGHT][CNUMBER][CNUMBER];
__global__ void convolute_kernel(float *IC, float*F, float*FP, float *SOC) {
int idx = threadIdx.x;
int idy = threadIdx.y;
//printf("hello\n");
int kCenterX = FWIDTH / 2;
int kCenterY = FHEIGHT / 2;
int filtersperthread = CNUMBER / BLOCK_SIZE;
int remaining = CNUMBER % BLOCK_SIZE;
float filtertemp[FHEIGHT * FWIDTH];
float outputtemp[(CHEIGHT / BLOCK_SIZE) * CWIDTH];
//printf("fil per t %d",filtersperthread);
//register float filtertemp[FHEIGHT * FWIDTH];
//register float outputtemp[CHEIGHT / BLOCK_SIZE * CWIDTH];
//printf("hello");
int chunk_size = CHEIGHT / BLOCK_SIZE;
int ilimits = idy * chunk_size;
int ioffset;
int joffset;
int myoffset;
int kchanneloffset;
int moffset;
int i_tmp;
int mm,nn;
int test;
int iioffset;
float sepFilter[CNUMBER];
for(int i = 0 ; i < CNUMBER; i++)
sepFilter[i] = FP[idx * CNUMBER + i];
for(int k = 0; k < filtersperthread; k++)
{
kchanneloffset = k * BLOCK_SIZE;
for(int i = 0; i < FHEIGHT; i++)
{
moffset = i * FWIDTH;
ioffset = i * CNUMBER * FWIDTH;
myoffset = kchanneloffset + ioffset + idx;
//ioffset = (k * FHEIGHT * FWIDTH * BLOCK_SIZE) + (idx * FHEIGHT * FWIDTH)
for(int j = 0; j < FWIDTH; j++)
{
joffset = j * CNUMBER;
filtertemp[moffset + j] = F[myoffset + joffset];
//printf("%.1f %.1f\n",filtertemp[k][i][j],F[k * FHEIGHT * FWIDTH * BLOCK_SIZE + idx%BLOCK_SIZE * FHEIGHT * FWIDTH + i * FWIDTH + j]);
}
}
for(int i = ilimits - kCenterY; i < ilimits + chunk_size + kCenterY; i++)
{
if(i >= 0 && i < CHEIGHT)
{
ioffset = i * CNUMBER * CWIDTH;
myoffset = kchanneloffset + ioffset + idx;
for(int j = 0; j < CWIDTH; j++)
{
joffset = j * CNUMBER;
int input_element = IC[myoffset + joffset];
for (int m = 0; m < FHEIGHT; ++m) // kernel rows
{
mm = FHEIGHT - 1 - m;
int ii = (i - kCenterY) + mm;
iioffset = ((ii - ilimits) * CWIDTH);
moffset = m * FWIDTH;
for (int n = 0; n < FWIDTH; ++n) // kernel columns
{
nn = FWIDTH - 1 - n;
int jj = (j - kCenterX) + nn;
if ((jj >= 0 && jj < CWIDTH) && (ii >= ilimits && ii < (ilimits + chunk_size)))
{
//printf("%.1f\n", filtertemp[k][m][n] * IC[idx * CWIDTH * CWIDTH + i * CWIDTH + j]);
outputtemp[iioffset + jj] += filtertemp[moffset + n] * input_element;
}
//__syncthreads();
}
}
}
}
}
/*
for(int i = ilimits; i < ilimits + chunk_size; i++)
{
ioffset = i * CNUMBER * CWIDTH;
iioffset = ((i - ilimits) * CWIDTH);
myoffset = kchanneloffset + ioffset + idx;
for(int j = 0; j < CWIDTH; j++)
{
joffset = j * CNUMBER;
OC[myoffset + joffset] = outputtemp[iioffset + j];
outputtemp[iioffset + j] = 0;
}
}
*/
}
float pointwisetemp;
//for(int k = 0; k < BLOCK_SIZE; k++)
//{
//if(idy == k)
//{
for(int j = 0; j < CHEIGHT / BLOCK_SIZE; j++)
for(int i = 0; i < CWIDTH; i++)
filteroutshared[idy][i][idx] = outputtemp[j * CWIDTH + i];
//}
__syncthreads();
/*
if(idx == 0 && idy == 0)
{
for(int i = 0; i < 16; i++)
{
for(int j = 0; j < 16; j++)
{
printf("%.1f ",filteroutshared[i][j]);
}
printf("\n");
}
}
*/
//if(idy == k)
//{
#pragma unroll
for(int j = 0; j < COUT; j++)
{
pointwisetemp = 0.0;
for(int i = 0; i < CWIDTH; i++)
{
pointwisetemp += filteroutshared[idy][j][(idx + i) % CNUMBER] * sepFilter[(idx + i) % CNUMBER];
}
SOC[idy * COUT * CWIDTH + j * COUT + idx] = pointwisetemp;
//if(idy == 0 && idx == 1)
//printf("%.f ",pointwisetemp);
}
//}
__syncthreads();
//}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
void cudaconvolute(float* IC, float* F, float* FP, float* SOC, float*** OC_cpu, float*** SOC_cpu)
{
float totalBytes_channel = sizeof(float) * CNUMBER * CHEIGHT * CWIDTH;
float totalBytes_filter = sizeof(float) * CNUMBER * FHEIGHT * FWIDTH;
float totalBytes_output = sizeof(float) * COUT * CHEIGHT * CWIDTH;
float totalBytes_sepfilter = sizeof(float) * CNUMBER * COUT;
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
// dim3 blocks(N/BLOCK_SIZE,N/BLOCK_SIZE);
float* device_IC;
float* device_F;
float* device_SOC;
float* device_FP;
hipMalloc(&device_IC, totalBytes_channel);
hipMalloc(&device_F, totalBytes_filter);
hipMalloc(&device_SOC, totalBytes_output);
hipMalloc(&device_FP, totalBytes_sepfilter);
hipMemcpy(device_IC, IC, totalBytes_channel, hipMemcpyHostToDevice);
hipMemcpy(device_F, F, totalBytes_filter, hipMemcpyHostToDevice);
hipMemcpy(device_FP, FP, totalBytes_sepfilter, hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
double startKernelTime = CycleTimer::currentSeconds();
hipEventRecord(start);
hipLaunchKernelGGL(( convolute_kernel), dim3(1), dim3(threadsPerBlock), 0, 0, device_IC, device_F, device_FP, device_SOC);
hipEventRecord(stop);
hipDeviceSynchronize();
double endKernelTime = CycleTimer::currentSeconds();
hipMemcpy(SOC, device_SOC, totalBytes_output, hipMemcpyDeviceToHost);
double kernelDuration = endKernelTime - startKernelTime;
printf("KernelDuration: %.3f ms\n", 1000.f * kernelDuration);
float m = 0;
hipEventElapsedTime(&m, start, stop);
printf("CUDA Elapsed Time %f ms\n", m);
hipEventDestroy(start);
hipEventDestroy(stop);
bool equal = true;
double fops = 0.0;
for (int i = 0;i < CHEIGHT; i++){
for (int j = 0; j < CWIDTH; j++) {
for(int k = 0; k < COUT; k++) {
fops += SOC[i * CWIDTH * COUT + j * COUT + k];
if(SOC_cpu[k][i][j] != SOC[i * CWIDTH * COUT + j * COUT + k])
{
equal = false;
//printf("%d %d %d %.1f != %.1f\n", k , i, j, SOC_cpu[k][i][j], SOC[i * CWIDTH * COUT + j * COUT + k]);
//break;
}
printf("%0.1f ",SOC[i * CWIDTH * COUT + j * COUT + k]);
}
printf("\n");
}
printf("\n");
}
printf("Bandwidth = %f GFLOPS/s\n", ((2 * FHEIGHT * FWIDTH * CWIDTH * CHEIGHT * CNUMBER) + (2 * COUT * CHEIGHT * CWIDTH * CNUMBER)) / (m * 1000000));
if(equal)
printf("EQUAL\n");
else
printf("NOT EQUAL\n");
hipFree(device_IC);
hipFree(device_F);
hipFree(device_SOC);
hipFree(device_FP);
}
void
printCudaInfo() {
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
printf(" Shared memory per block: %d bytes\n", deviceProps.sharedMemPerBlock);
}
printf("---------------------------------------------------------\n");
}
| 1b1e28e93c6b39fee81df555bf1013c687cdfd3b.cu | // System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
#define BLOCK_SIZE 16
#define CWIDTH 16
#define CHEIGHT 16
#define FWIDTH 3
#define FHEIGHT 3
#define CNUMBER 16
#define COUT 16
__shared__ float filteroutshared[CHEIGHT][CNUMBER][CNUMBER];
__global__ void convolute_kernel(float *IC, float*F, float*FP, float *SOC) {
int idx = threadIdx.x;
int idy = threadIdx.y;
//printf("hello\n");
int kCenterX = FWIDTH / 2;
int kCenterY = FHEIGHT / 2;
int filtersperthread = CNUMBER / BLOCK_SIZE;
int remaining = CNUMBER % BLOCK_SIZE;
float filtertemp[FHEIGHT * FWIDTH];
float outputtemp[(CHEIGHT / BLOCK_SIZE) * CWIDTH];
//printf("fil per t %d",filtersperthread);
//register float filtertemp[FHEIGHT * FWIDTH];
//register float outputtemp[CHEIGHT / BLOCK_SIZE * CWIDTH];
//printf("hello");
int chunk_size = CHEIGHT / BLOCK_SIZE;
int ilimits = idy * chunk_size;
int ioffset;
int joffset;
int myoffset;
int kchanneloffset;
int moffset;
int i_tmp;
int mm,nn;
int test;
int iioffset;
float sepFilter[CNUMBER];
for(int i = 0 ; i < CNUMBER; i++)
sepFilter[i] = FP[idx * CNUMBER + i];
for(int k = 0; k < filtersperthread; k++)
{
kchanneloffset = k * BLOCK_SIZE;
for(int i = 0; i < FHEIGHT; i++)
{
moffset = i * FWIDTH;
ioffset = i * CNUMBER * FWIDTH;
myoffset = kchanneloffset + ioffset + idx;
//ioffset = (k * FHEIGHT * FWIDTH * BLOCK_SIZE) + (idx * FHEIGHT * FWIDTH)
for(int j = 0; j < FWIDTH; j++)
{
joffset = j * CNUMBER;
filtertemp[moffset + j] = F[myoffset + joffset];
//printf("%.1f %.1f\n",filtertemp[k][i][j],F[k * FHEIGHT * FWIDTH * BLOCK_SIZE + idx%BLOCK_SIZE * FHEIGHT * FWIDTH + i * FWIDTH + j]);
}
}
for(int i = ilimits - kCenterY; i < ilimits + chunk_size + kCenterY; i++)
{
if(i >= 0 && i < CHEIGHT)
{
ioffset = i * CNUMBER * CWIDTH;
myoffset = kchanneloffset + ioffset + idx;
for(int j = 0; j < CWIDTH; j++)
{
joffset = j * CNUMBER;
int input_element = IC[myoffset + joffset];
for (int m = 0; m < FHEIGHT; ++m) // kernel rows
{
mm = FHEIGHT - 1 - m;
int ii = (i - kCenterY) + mm;
iioffset = ((ii - ilimits) * CWIDTH);
moffset = m * FWIDTH;
for (int n = 0; n < FWIDTH; ++n) // kernel columns
{
nn = FWIDTH - 1 - n;
int jj = (j - kCenterX) + nn;
if ((jj >= 0 && jj < CWIDTH) && (ii >= ilimits && ii < (ilimits + chunk_size)))
{
//printf("%.1f\n", filtertemp[k][m][n] * IC[idx * CWIDTH * CWIDTH + i * CWIDTH + j]);
outputtemp[iioffset + jj] += filtertemp[moffset + n] * input_element;
}
//__syncthreads();
}
}
}
}
}
/*
for(int i = ilimits; i < ilimits + chunk_size; i++)
{
ioffset = i * CNUMBER * CWIDTH;
iioffset = ((i - ilimits) * CWIDTH);
myoffset = kchanneloffset + ioffset + idx;
for(int j = 0; j < CWIDTH; j++)
{
joffset = j * CNUMBER;
OC[myoffset + joffset] = outputtemp[iioffset + j];
outputtemp[iioffset + j] = 0;
}
}
*/
}
float pointwisetemp;
//for(int k = 0; k < BLOCK_SIZE; k++)
//{
//if(idy == k)
//{
for(int j = 0; j < CHEIGHT / BLOCK_SIZE; j++)
for(int i = 0; i < CWIDTH; i++)
filteroutshared[idy][i][idx] = outputtemp[j * CWIDTH + i];
//}
__syncthreads();
/*
if(idx == 0 && idy == 0)
{
for(int i = 0; i < 16; i++)
{
for(int j = 0; j < 16; j++)
{
printf("%.1f ",filteroutshared[i][j]);
}
printf("\n");
}
}
*/
//if(idy == k)
//{
#pragma unroll
for(int j = 0; j < COUT; j++)
{
pointwisetemp = 0.0;
for(int i = 0; i < CWIDTH; i++)
{
pointwisetemp += filteroutshared[idy][j][(idx + i) % CNUMBER] * sepFilter[(idx + i) % CNUMBER];
}
SOC[idy * COUT * CWIDTH + j * COUT + idx] = pointwisetemp;
//if(idy == 0 && idx == 1)
//printf("%.f ",pointwisetemp);
}
//}
__syncthreads();
//}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
void cudaconvolute(float* IC, float* F, float* FP, float* SOC, float*** OC_cpu, float*** SOC_cpu)
{
float totalBytes_channel = sizeof(float) * CNUMBER * CHEIGHT * CWIDTH;
float totalBytes_filter = sizeof(float) * CNUMBER * FHEIGHT * FWIDTH;
float totalBytes_output = sizeof(float) * COUT * CHEIGHT * CWIDTH;
float totalBytes_sepfilter = sizeof(float) * CNUMBER * COUT;
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
// dim3 blocks(N/BLOCK_SIZE,N/BLOCK_SIZE);
float* device_IC;
float* device_F;
float* device_SOC;
float* device_FP;
cudaMalloc(&device_IC, totalBytes_channel);
cudaMalloc(&device_F, totalBytes_filter);
cudaMalloc(&device_SOC, totalBytes_output);
cudaMalloc(&device_FP, totalBytes_sepfilter);
cudaMemcpy(device_IC, IC, totalBytes_channel, cudaMemcpyHostToDevice);
cudaMemcpy(device_F, F, totalBytes_filter, cudaMemcpyHostToDevice);
cudaMemcpy(device_FP, FP, totalBytes_sepfilter, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
double startKernelTime = CycleTimer::currentSeconds();
cudaEventRecord(start);
convolute_kernel<<<1, threadsPerBlock>>>(device_IC, device_F, device_FP, device_SOC);
cudaEventRecord(stop);
cudaDeviceSynchronize();
double endKernelTime = CycleTimer::currentSeconds();
cudaMemcpy(SOC, device_SOC, totalBytes_output, cudaMemcpyDeviceToHost);
double kernelDuration = endKernelTime - startKernelTime;
printf("KernelDuration: %.3f ms\n", 1000.f * kernelDuration);
float m = 0;
cudaEventElapsedTime(&m, start, stop);
printf("CUDA Elapsed Time %f ms\n", m);
cudaEventDestroy(start);
cudaEventDestroy(stop);
bool equal = true;
double fops = 0.0;
for (int i = 0;i < CHEIGHT; i++){
for (int j = 0; j < CWIDTH; j++) {
for(int k = 0; k < COUT; k++) {
fops += SOC[i * CWIDTH * COUT + j * COUT + k];
if(SOC_cpu[k][i][j] != SOC[i * CWIDTH * COUT + j * COUT + k])
{
equal = false;
//printf("%d %d %d %.1f != %.1f\n", k , i, j, SOC_cpu[k][i][j], SOC[i * CWIDTH * COUT + j * COUT + k]);
//break;
}
printf("%0.1f ",SOC[i * CWIDTH * COUT + j * COUT + k]);
}
printf("\n");
}
printf("\n");
}
printf("Bandwidth = %f GFLOPS/s\n", ((2 * FHEIGHT * FWIDTH * CWIDTH * CHEIGHT * CNUMBER) + (2 * COUT * CHEIGHT * CWIDTH * CNUMBER)) / (m * 1000000));
if(equal)
printf("EQUAL\n");
else
printf("NOT EQUAL\n");
cudaFree(device_IC);
cudaFree(device_F);
cudaFree(device_SOC);
cudaFree(device_FP);
}
void
printCudaInfo() {
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
printf(" Shared memory per block: %d bytes\n", deviceProps.sharedMemPerBlock);
}
printf("---------------------------------------------------------\n");
}
|
5d3a5de6c16f6fb40c2990257e13d05255a29ec3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2020 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
//
#define EIGEN_USE_GPU
#include "SamplingOpKernel.h"
#include "open3d/ml/Helper.h"
#include "open3d/ml/contrib/PointSampling.cuh"
#include "open3d/ml/contrib/cuda_utils.h"
using namespace open3d;
using namespace open3d::ml;
using namespace open3d::ml::contrib;
using namespace tensorflow;
class FurthestPointSamplingOpKernelCUDA : public FurthestPointSamplingOpKernel {
public:
explicit FurthestPointSamplingOpKernelCUDA(
OpKernelConstruction *construction)
: FurthestPointSamplingOpKernel(construction) {}
void Kernel(tensorflow::OpKernelContext *context,
int b,
int n,
int m,
const float *dataset,
float *temp,
int *idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
// fill with big value
hipMemset(temp, 80, b * n * sizeof(float));
auto stream = context->eigen_gpu_device().stream();
hipError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<1024>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp,
idxs);
break;
case 512:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp,
idxs);
break;
case 256:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<256>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp,
idxs);
break;
case 128:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<128>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp,
idxs);
break;
case 64:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<64>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 32:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<32>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 16:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<16>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 8:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<8>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 4:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<4>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 2:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<2>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 1:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<1>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
default:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp,
idxs);
}
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n",
hipGetErrorString(err));
exit(-1);
}
}
};
REGISTER_KERNEL_BUILDER(Name("Open3DFurthestPointSampling").Device(DEVICE_GPU),
FurthestPointSamplingOpKernelCUDA);
class GatherPointsOpKernelCUDA : public GatherPointsOpKernel {
public:
explicit GatherPointsOpKernelCUDA(OpKernelConstruction *construction)
: GatherPointsOpKernel(construction) {}
void Kernel(tensorflow::OpKernelContext *context,
int b,
int c,
int n,
int npoints,
const float *points,
const int *idx,
float *out) {
// grad_out: (B, C, npoints)
// idx: (B, npoints)
// output:
// grad_points: (B, C, N)
auto stream = context->eigen_gpu_device().stream();
hipError_t err;
dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( gather_points_kernel), dim3(blocks), dim3(threads), 0, stream, b, c, n, npoints,
points, idx, out);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n",
hipGetErrorString(err));
exit(-1);
}
}
};
REGISTER_KERNEL_BUILDER(Name("Open3DGatherPoints").Device(DEVICE_GPU),
GatherPointsOpKernelCUDA);
class GatherPointsGradOpKernelCUDA : public GatherPointsGradOpKernel {
public:
explicit GatherPointsGradOpKernelCUDA(OpKernelConstruction *construction)
: GatherPointsGradOpKernel(construction) {}
void Kernel(tensorflow::OpKernelContext *context,
int b,
int c,
int n,
int npoints,
const float *grad_out,
const int *idx,
float *grad_points) {
// grad_out: (B, C, npoints)
// idx: (B, npoints)
// output:
// grad_points: (B, C, N)
hipMemset(grad_points, 0, b * c * n * sizeof(float));
auto stream = context->eigen_gpu_device().stream();
hipError_t err;
dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( gather_points_grad_kernel), dim3(blocks), dim3(threads), 0, stream,
b, c, n, npoints, grad_out, idx, grad_points);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n",
hipGetErrorString(err));
exit(-1);
}
}
};
REGISTER_KERNEL_BUILDER(Name("Open3DGatherPointsGrad").Device(DEVICE_GPU),
GatherPointsGradOpKernelCUDA);
| 5d3a5de6c16f6fb40c2990257e13d05255a29ec3.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2020 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
//
#define EIGEN_USE_GPU
#include "SamplingOpKernel.h"
#include "open3d/ml/Helper.h"
#include "open3d/ml/contrib/PointSampling.cuh"
#include "open3d/ml/contrib/cuda_utils.h"
using namespace open3d;
using namespace open3d::ml;
using namespace open3d::ml::contrib;
using namespace tensorflow;
class FurthestPointSamplingOpKernelCUDA : public FurthestPointSamplingOpKernel {
public:
explicit FurthestPointSamplingOpKernelCUDA(
OpKernelConstruction *construction)
: FurthestPointSamplingOpKernel(construction) {}
void Kernel(tensorflow::OpKernelContext *context,
int b,
int n,
int m,
const float *dataset,
float *temp,
int *idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
// fill with big value
cudaMemset(temp, 80, b * n * sizeof(float));
auto stream = context->eigen_gpu_device().stream();
cudaError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
furthest_point_sampling_kernel<1024>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp,
idxs);
break;
case 512:
furthest_point_sampling_kernel<512>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp,
idxs);
break;
case 256:
furthest_point_sampling_kernel<256>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp,
idxs);
break;
case 128:
furthest_point_sampling_kernel<128>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp,
idxs);
break;
case 64:
furthest_point_sampling_kernel<64><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 32:
furthest_point_sampling_kernel<32><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 16:
furthest_point_sampling_kernel<16><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 8:
furthest_point_sampling_kernel<8><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 4:
furthest_point_sampling_kernel<4><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 2:
furthest_point_sampling_kernel<2><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 1:
furthest_point_sampling_kernel<1><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
default:
furthest_point_sampling_kernel<512>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp,
idxs);
}
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n",
cudaGetErrorString(err));
exit(-1);
}
}
};
REGISTER_KERNEL_BUILDER(Name("Open3DFurthestPointSampling").Device(DEVICE_GPU),
FurthestPointSamplingOpKernelCUDA);
class GatherPointsOpKernelCUDA : public GatherPointsOpKernel {
public:
explicit GatherPointsOpKernelCUDA(OpKernelConstruction *construction)
: GatherPointsOpKernel(construction) {}
void Kernel(tensorflow::OpKernelContext *context,
int b,
int c,
int n,
int npoints,
const float *points,
const int *idx,
float *out) {
// grad_out: (B, C, npoints)
// idx: (B, npoints)
// output:
// grad_points: (B, C, N)
auto stream = context->eigen_gpu_device().stream();
cudaError_t err;
dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
gather_points_kernel<<<blocks, threads, 0, stream>>>(b, c, n, npoints,
points, idx, out);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n",
cudaGetErrorString(err));
exit(-1);
}
}
};
REGISTER_KERNEL_BUILDER(Name("Open3DGatherPoints").Device(DEVICE_GPU),
GatherPointsOpKernelCUDA);
class GatherPointsGradOpKernelCUDA : public GatherPointsGradOpKernel {
public:
explicit GatherPointsGradOpKernelCUDA(OpKernelConstruction *construction)
: GatherPointsGradOpKernel(construction) {}
void Kernel(tensorflow::OpKernelContext *context,
int b,
int c,
int n,
int npoints,
const float *grad_out,
const int *idx,
float *grad_points) {
// grad_out: (B, C, npoints)
// idx: (B, npoints)
// output:
// grad_points: (B, C, N)
cudaMemset(grad_points, 0, b * c * n * sizeof(float));
auto stream = context->eigen_gpu_device().stream();
cudaError_t err;
dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
gather_points_grad_kernel<<<blocks, threads, 0, stream>>>(
b, c, n, npoints, grad_out, idx, grad_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n",
cudaGetErrorString(err));
exit(-1);
}
}
};
REGISTER_KERNEL_BUILDER(Name("Open3DGatherPointsGrad").Device(DEVICE_GPU),
GatherPointsGradOpKernelCUDA);
|
743728ae02ea98f118595d9796322ef60218d308.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <memory>
#include <string>
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/operators/data_norm_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#endif
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using DataLayout = framework::DataLayout;
using platform::PADDLE_CUDA_NUM_THREADS;
inline int GET_BLOCKS(const int N) {
return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS;
}
template <typename T>
__global__ void KernelDataNormFF(int N, int C, const T *x, T *y, const T *mean,
const T *scale) {
CUDA_KERNEL_LOOP(i, N * C) {
int col = i % C;
y[i] = (x[i] - mean[col]) * scale[col];
}
}
template <typename T>
__global__ void KernelMeanScale(int C, const T *batch_size, const T *batch_sum,
const T *batch_square_sum, T *mean, T *scale) {
CUDA_KERNEL_LOOP(i, C) {
mean[i] = batch_sum[i] / batch_size[i];
scale[i] = sqrt(batch_size[i] / batch_square_sum[i]);
}
}
template <typename T>
__global__ void KernelDataNormBP(int N, int C, const T *y_grad, const T *scale,
T *x_grad) {
CUDA_KERNEL_LOOP(i, N * C) { x_grad[i] = y_grad[i] * scale[i % C]; }
}
template <typename T>
__global__ void KernelDataNormBPStat(int N, int C, const T *x_val,
const T *means,
const float squared_sum_epsilon,
T *batch_size, T *batch_sum,
T *batch_square_sum) {
CUDA_KERNEL_LOOP(i, C) {
T val_sum = 0;
T square_sum = 0;
for (int j = 0; j < N; j++) {
val_sum += x_val[j * C + i];
square_sum +=
(x_val[j * C + i] - means[i]) * (x_val[j * C + i] - means[i]);
}
batch_size[i] = 1;
batch_sum[i] = val_sum / N;
batch_square_sum[i] = square_sum / N + squared_sum_epsilon;
}
}
template <typename T>
__global__ void KernelUpdateParam(int C, const T *d_batch_size,
const T *d_batch_sum,
const T *d_batch_square_sum, T *batch_size,
T *batch_sum, T *batch_square_sum,
const float decay_rate) {
CUDA_KERNEL_LOOP(i, C) {
batch_size[i] = batch_size[i] * decay_rate + d_batch_size[i];
batch_sum[i] = batch_sum[i] * decay_rate + d_batch_sum[i];
batch_square_sum[i] =
batch_square_sum[i] * decay_rate + d_batch_square_sum[i];
}
}
template <typename T>
class DataNormKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
const auto *x = ctx.Input<Tensor>("X");
const auto &x_dims = x->dims();
// Align with CPU version, but should we add this restriction?
PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::PreconditionNotMet(
"The Input dim size should be 2"));
const int N = x_dims[0];
const int C = x_dims[1];
const T *batch_size_in = ctx.Input<Tensor>("BatchSize")->data<T>();
const T *batch_sum_in = ctx.Input<Tensor>("BatchSum")->data<T>();
const T *batch_square_sum_in =
ctx.Input<Tensor>("BatchSquareSum")->data<T>();
auto *x_data = x->data<T>();
// alloc memory
T *y_data = ctx.Output<Tensor>("Y")->mutable_data<T>(ctx.GetPlace());
T *mean_out_data =
ctx.Output<Tensor>("Means")->mutable_data<T>(ctx.GetPlace());
T *scale_out_data =
ctx.Output<Tensor>("Scales")->mutable_data<T>(ctx.GetPlace());
auto stream =
ctx.template device_context<platform::CUDADeviceContext>().stream();
hipLaunchKernelGGL(( KernelMeanScale), dim3(GET_BLOCKS(C)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
C, batch_size_in, batch_sum_in, batch_square_sum_in, mean_out_data,
scale_out_data);
hipLaunchKernelGGL(( KernelDataNormFF), dim3(GET_BLOCKS(C * N)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
N, C, x_data, y_data, mean_out_data, scale_out_data);
}
};
template <typename T>
class DataNormGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
const auto *x = ctx.Input<Tensor>("X");
const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
const auto *scales = ctx.Input<Tensor>("Scales");
const auto *means = ctx.Input<Tensor>("Means");
const float epsilon = ctx.Attr<float>("epsilon");
const float dr = ctx.Attr<float>("summary_decay_rate");
const bool need_sync_stats = ctx.Attr<bool>("sync_stats");
const auto &x_dims = x->dims();
// Align with CPU version, but should we add this restriction?
PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::PreconditionNotMet(
"The Input dim size should be 2"));
const int N = x_dims[0];
const int C = x_dims[1];
// init output
Tensor *d_x = nullptr;
if (ctx.HasOutput(framework::GradVarName("X"))) {
d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
}
T *d_batch_size = ctx.Output<Tensor>(framework::GradVarName("BatchSize"))
->mutable_data<T>(ctx.GetPlace());
T *d_batch_sum = ctx.Output<Tensor>(framework::GradVarName("BatchSum"))
->mutable_data<T>(ctx.GetPlace());
T *d_batch_square_sum =
ctx.Output<Tensor>(framework::GradVarName("BatchSquareSum"))
->mutable_data<T>(ctx.GetPlace());
auto stream =
ctx.template device_context<platform::CUDADeviceContext>().stream();
if (d_x != nullptr) {
hipLaunchKernelGGL(( KernelDataNormBP), dim3(GET_BLOCKS(C * N)), dim3(PADDLE_CUDA_NUM_THREADS), 0,
stream, N, C, d_y->data<T>(), scales->data<T>(),
d_x->mutable_data<T>(ctx.GetPlace()));
}
hipLaunchKernelGGL(( KernelDataNormBPStat), dim3(GET_BLOCKS(C)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
N, C, x->data<T>(), means->data<T>(), epsilon, d_batch_size,
d_batch_sum, d_batch_square_sum);
if (need_sync_stats) {
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
auto comm = platform::NCCLCommContext::Instance().Get(0, ctx.GetPlace());
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
reinterpret_cast<const void *>(d_batch_size),
reinterpret_cast<void *>(d_batch_size), C,
platform::ToNCCLDataType(framework::TransToProtoVarType(x->dtype())),
ncclSum, comm->comm(), stream));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
reinterpret_cast<const void *>(d_batch_sum),
reinterpret_cast<void *>(d_batch_sum), C,
platform::ToNCCLDataType(framework::TransToProtoVarType(x->dtype())),
ncclSum, comm->comm(), stream));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
reinterpret_cast<const void *>(d_batch_square_sum),
reinterpret_cast<void *>(d_batch_square_sum), C,
platform::ToNCCLDataType(framework::TransToProtoVarType(x->dtype())),
ncclSum, comm->comm(), stream));
platform::GpuStreamSync(stream);
#else
PADDLE_THROW(platform::errors::PreconditionNotMet(
"PaddlePaddle should compile with GPU, and need_sync_stats connot be "
"supported on windows now."));
#endif
}
T *batch_size_data =
ctx.Output<Tensor>("BatchSize")->mutable_data<T>(ctx.GetPlace());
T *batch_sum_data =
ctx.Output<Tensor>("BatchSum")->mutable_data<T>(ctx.GetPlace());
T *batch_square_sum_data =
ctx.Output<Tensor>("BatchSquareSum")->mutable_data<T>(ctx.GetPlace());
hipLaunchKernelGGL(( KernelUpdateParam), dim3(GET_BLOCKS(C)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
C, d_batch_size, d_batch_sum, d_batch_square_sum, batch_size_data,
batch_sum_data, batch_square_sum_data, dr);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
data_norm, ops::DataNormKernel<paddle::platform::CUDADeviceContext, float>,
ops::DataNormKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
data_norm_grad,
ops::DataNormGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::DataNormGradKernel<paddle::platform::CUDADeviceContext, double>);
| 743728ae02ea98f118595d9796322ef60218d308.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <memory>
#include <string>
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/operators/data_norm_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#endif
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using DataLayout = framework::DataLayout;
using platform::PADDLE_CUDA_NUM_THREADS;
inline int GET_BLOCKS(const int N) {
return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS;
}
template <typename T>
__global__ void KernelDataNormFF(int N, int C, const T *x, T *y, const T *mean,
const T *scale) {
CUDA_KERNEL_LOOP(i, N * C) {
int col = i % C;
y[i] = (x[i] - mean[col]) * scale[col];
}
}
template <typename T>
__global__ void KernelMeanScale(int C, const T *batch_size, const T *batch_sum,
const T *batch_square_sum, T *mean, T *scale) {
CUDA_KERNEL_LOOP(i, C) {
mean[i] = batch_sum[i] / batch_size[i];
scale[i] = sqrt(batch_size[i] / batch_square_sum[i]);
}
}
template <typename T>
__global__ void KernelDataNormBP(int N, int C, const T *y_grad, const T *scale,
T *x_grad) {
CUDA_KERNEL_LOOP(i, N * C) { x_grad[i] = y_grad[i] * scale[i % C]; }
}
template <typename T>
__global__ void KernelDataNormBPStat(int N, int C, const T *x_val,
const T *means,
const float squared_sum_epsilon,
T *batch_size, T *batch_sum,
T *batch_square_sum) {
CUDA_KERNEL_LOOP(i, C) {
T val_sum = 0;
T square_sum = 0;
for (int j = 0; j < N; j++) {
val_sum += x_val[j * C + i];
square_sum +=
(x_val[j * C + i] - means[i]) * (x_val[j * C + i] - means[i]);
}
batch_size[i] = 1;
batch_sum[i] = val_sum / N;
batch_square_sum[i] = square_sum / N + squared_sum_epsilon;
}
}
template <typename T>
__global__ void KernelUpdateParam(int C, const T *d_batch_size,
const T *d_batch_sum,
const T *d_batch_square_sum, T *batch_size,
T *batch_sum, T *batch_square_sum,
const float decay_rate) {
CUDA_KERNEL_LOOP(i, C) {
batch_size[i] = batch_size[i] * decay_rate + d_batch_size[i];
batch_sum[i] = batch_sum[i] * decay_rate + d_batch_sum[i];
batch_square_sum[i] =
batch_square_sum[i] * decay_rate + d_batch_square_sum[i];
}
}
template <typename T>
class DataNormKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
const auto *x = ctx.Input<Tensor>("X");
const auto &x_dims = x->dims();
// Align with CPU version, but should we add this restriction?
PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::PreconditionNotMet(
"The Input dim size should be 2"));
const int N = x_dims[0];
const int C = x_dims[1];
const T *batch_size_in = ctx.Input<Tensor>("BatchSize")->data<T>();
const T *batch_sum_in = ctx.Input<Tensor>("BatchSum")->data<T>();
const T *batch_square_sum_in =
ctx.Input<Tensor>("BatchSquareSum")->data<T>();
auto *x_data = x->data<T>();
// alloc memory
T *y_data = ctx.Output<Tensor>("Y")->mutable_data<T>(ctx.GetPlace());
T *mean_out_data =
ctx.Output<Tensor>("Means")->mutable_data<T>(ctx.GetPlace());
T *scale_out_data =
ctx.Output<Tensor>("Scales")->mutable_data<T>(ctx.GetPlace());
auto stream =
ctx.template device_context<platform::CUDADeviceContext>().stream();
KernelMeanScale<<<GET_BLOCKS(C), PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
C, batch_size_in, batch_sum_in, batch_square_sum_in, mean_out_data,
scale_out_data);
KernelDataNormFF<<<GET_BLOCKS(C * N), PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
N, C, x_data, y_data, mean_out_data, scale_out_data);
}
};
template <typename T>
class DataNormGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
const auto *x = ctx.Input<Tensor>("X");
const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
const auto *scales = ctx.Input<Tensor>("Scales");
const auto *means = ctx.Input<Tensor>("Means");
const float epsilon = ctx.Attr<float>("epsilon");
const float dr = ctx.Attr<float>("summary_decay_rate");
const bool need_sync_stats = ctx.Attr<bool>("sync_stats");
const auto &x_dims = x->dims();
// Align with CPU version, but should we add this restriction?
PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::PreconditionNotMet(
"The Input dim size should be 2"));
const int N = x_dims[0];
const int C = x_dims[1];
// init output
Tensor *d_x = nullptr;
if (ctx.HasOutput(framework::GradVarName("X"))) {
d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
}
T *d_batch_size = ctx.Output<Tensor>(framework::GradVarName("BatchSize"))
->mutable_data<T>(ctx.GetPlace());
T *d_batch_sum = ctx.Output<Tensor>(framework::GradVarName("BatchSum"))
->mutable_data<T>(ctx.GetPlace());
T *d_batch_square_sum =
ctx.Output<Tensor>(framework::GradVarName("BatchSquareSum"))
->mutable_data<T>(ctx.GetPlace());
auto stream =
ctx.template device_context<platform::CUDADeviceContext>().stream();
if (d_x != nullptr) {
KernelDataNormBP<<<GET_BLOCKS(C * N), PADDLE_CUDA_NUM_THREADS, 0,
stream>>>(N, C, d_y->data<T>(), scales->data<T>(),
d_x->mutable_data<T>(ctx.GetPlace()));
}
KernelDataNormBPStat<<<GET_BLOCKS(C), PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
N, C, x->data<T>(), means->data<T>(), epsilon, d_batch_size,
d_batch_sum, d_batch_square_sum);
if (need_sync_stats) {
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
auto comm = platform::NCCLCommContext::Instance().Get(0, ctx.GetPlace());
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
reinterpret_cast<const void *>(d_batch_size),
reinterpret_cast<void *>(d_batch_size), C,
platform::ToNCCLDataType(framework::TransToProtoVarType(x->dtype())),
ncclSum, comm->comm(), stream));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
reinterpret_cast<const void *>(d_batch_sum),
reinterpret_cast<void *>(d_batch_sum), C,
platform::ToNCCLDataType(framework::TransToProtoVarType(x->dtype())),
ncclSum, comm->comm(), stream));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
reinterpret_cast<const void *>(d_batch_square_sum),
reinterpret_cast<void *>(d_batch_square_sum), C,
platform::ToNCCLDataType(framework::TransToProtoVarType(x->dtype())),
ncclSum, comm->comm(), stream));
platform::GpuStreamSync(stream);
#else
PADDLE_THROW(platform::errors::PreconditionNotMet(
"PaddlePaddle should compile with GPU, and need_sync_stats connot be "
"supported on windows now."));
#endif
}
T *batch_size_data =
ctx.Output<Tensor>("BatchSize")->mutable_data<T>(ctx.GetPlace());
T *batch_sum_data =
ctx.Output<Tensor>("BatchSum")->mutable_data<T>(ctx.GetPlace());
T *batch_square_sum_data =
ctx.Output<Tensor>("BatchSquareSum")->mutable_data<T>(ctx.GetPlace());
KernelUpdateParam<<<GET_BLOCKS(C), PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
C, d_batch_size, d_batch_sum, d_batch_square_sum, batch_size_data,
batch_sum_data, batch_square_sum_data, dr);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
data_norm, ops::DataNormKernel<paddle::platform::CUDADeviceContext, float>,
ops::DataNormKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
data_norm_grad,
ops::DataNormGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::DataNormGradKernel<paddle::platform::CUDADeviceContext, double>);
|
b9478e514ef57e4f585e2061fe10250519ce821a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "scan.cuh"
#include <iostream>
using std::cout;
using std::endl;
using std::printf;
/*
1. The main hillis_steele algorithm that will work on each block independenty
*/
__global__ void hillis_steele(const float *Din, float *Dout, unsigned int n,
float *blockSums, unsigned int total_size) {
extern __shared__ float temp[];
unsigned int thid = threadIdx.x;
unsigned int pout = 0, pin = 1;
unsigned int arrIdx = threadIdx.x + (blockIdx.x * blockDim.x);
if (arrIdx <= total_size) {
// Load data from device to shared memory
temp[thid] = (arrIdx == 0) ? 0 : Din[arrIdx - 1];
__syncthreads();
for (int offset = 1; offset < n; offset *= 2) {
pout = 1 - pout;
pin = 1 - pout;
if (thid >= offset) {
temp[pout * n + thid] =
temp[pin * n + thid] + temp[pin * n + thid - offset];
} else {
temp[pout * n + thid] = temp[pin * n + thid];
}
__syncthreads();
}
// If this is the last thread in it's block, write it's output
// (thid == n) has to be checked for when total elements < threads_per_block
if ((thid == (blockDim.x - 1)) || (thid == total_size - 1))
blockSums[blockIdx.x] = temp[pout * n + thid];
// Copy data back to device memory
Dout[arrIdx] = temp[pout * n + thid];
}
}
/*
2. Defining an auxilliary kernel ot accumulate block sums
This may be incorporated in the main hillis_steele kernel
*/
__global__ void block_hillis_steele(const float *Din, float *Dout,
unsigned int n) {
extern __shared__ float temp[];
unsigned int thid = threadIdx.x;
unsigned int pout = 0, pin = 1;
if (thid <= n) {
temp[thid] = (thid == 0) ? 0 : Din[thid - 1];
__syncthreads();
for (int offset = 1; offset < n; offset *= 2) {
pout = 1 - pout;
pin = 1 - pout;
if (thid >= offset) {
temp[pout * n + thid] =
temp[pin * n + thid] + temp[pin * n + thid - offset];
} else {
temp[pout * n + thid] = temp[pin * n + thid];
}
__syncthreads();
}
// Copy data back to device memory
Dout[thid] = temp[pout * n + thid];
}
}
/*
3 Adds the previous block sum to every element of output
*/
__global__ void block_adder_kernel(float *Dout, const float *blockSums,
unsigned int total_size) {
unsigned int arrIdx = threadIdx.x + (blockIdx.x * blockDim.x);
if (arrIdx <= total_size)
Dout[arrIdx] += blockSums[blockIdx.x];
__syncthreads();
}
__host__ void scan(const float *in, float *out, unsigned int n,
unsigned int threads_per_block) {
float *Din, *Dout, *blockSums, *blockSums_out;
unsigned int shMemSize = (2 * threads_per_block) * sizeof(float);
hipError_t err = hipSuccess;
unsigned int n_blocks;
// Allocate space for device: Din
err = hipMalloc((void **)&Din, n * sizeof(float));
if (err != hipSuccess)
cout << "hipMalloc Failed for Input Array! Error: " << err << endl;
err = hipMalloc((void **)&Dout, n * sizeof(float));
if (err != hipSuccess)
cout << "hipMalloc Failed for Output Array! Error: " << err << endl;
// We need a separate array to store block sums
n_blocks = (n + (threads_per_block - 1)) / threads_per_block;
err = hipMalloc((void **)&blockSums, n_blocks * sizeof(float));
if (err != hipSuccess)
cout << "hipMalloc Failed for BlockSums Array! Error: " << err << endl;
hipMemset(blockSums, 0, n_blocks); // Initializing hipMemset
// Copying the data to device
hipMemcpy(Din, in, n * sizeof(float), hipMemcpyHostToDevice);
// Implementation that supports n upto 1024
hipLaunchKernelGGL(( hillis_steele), dim3(n_blocks), dim3(threads_per_block), shMemSize, 0,
Din, Dout, threads_per_block, blockSums, n);
err = hipMalloc((void **)&blockSums_out, n_blocks * sizeof(float));
if (err != hipSuccess)
cout << "hipMalloc Failed for BlockSums_Out Array! Error: " << err << endl;
// Running hillis steele algo again, this time on block_sums
// If we keep the batch_size = 1024, we cannot possibly have n_blocks > 1024
// because Assumption: n <= threads_per_block * threads_per_block
hipLaunchKernelGGL(( block_hillis_steele), dim3(1), dim3(threads_per_block), shMemSize, 0,
blockSums, blockSums_out, n_blocks);
hipLaunchKernelGGL(( block_adder_kernel), dim3(n_blocks), dim3(threads_per_block), 0, 0, Dout, blockSums_out, n);
// Copy results back to the host
hipMemcpy(out, Dout, n * sizeof(float), hipMemcpyDeviceToHost);
// Wait for GPU to finish
hipDeviceSynchronize();
// Cleanup
hipFree(Din);
hipFree(Dout);
hipFree(blockSums);
hipFree(blockSums_out);
}
| b9478e514ef57e4f585e2061fe10250519ce821a.cu | #include "scan.cuh"
#include <iostream>
using std::cout;
using std::endl;
using std::printf;
/*
1. The main hillis_steele algorithm that will work on each block independenty
*/
__global__ void hillis_steele(const float *Din, float *Dout, unsigned int n,
float *blockSums, unsigned int total_size) {
extern __shared__ float temp[];
unsigned int thid = threadIdx.x;
unsigned int pout = 0, pin = 1;
unsigned int arrIdx = threadIdx.x + (blockIdx.x * blockDim.x);
if (arrIdx <= total_size) {
// Load data from device to shared memory
temp[thid] = (arrIdx == 0) ? 0 : Din[arrIdx - 1];
__syncthreads();
for (int offset = 1; offset < n; offset *= 2) {
pout = 1 - pout;
pin = 1 - pout;
if (thid >= offset) {
temp[pout * n + thid] =
temp[pin * n + thid] + temp[pin * n + thid - offset];
} else {
temp[pout * n + thid] = temp[pin * n + thid];
}
__syncthreads();
}
// If this is the last thread in it's block, write it's output
// (thid == n) has to be checked for when total elements < threads_per_block
if ((thid == (blockDim.x - 1)) || (thid == total_size - 1))
blockSums[blockIdx.x] = temp[pout * n + thid];
// Copy data back to device memory
Dout[arrIdx] = temp[pout * n + thid];
}
}
/*
2. Defining an auxilliary kernel ot accumulate block sums
This may be incorporated in the main hillis_steele kernel
*/
__global__ void block_hillis_steele(const float *Din, float *Dout,
unsigned int n) {
extern __shared__ float temp[];
unsigned int thid = threadIdx.x;
unsigned int pout = 0, pin = 1;
if (thid <= n) {
temp[thid] = (thid == 0) ? 0 : Din[thid - 1];
__syncthreads();
for (int offset = 1; offset < n; offset *= 2) {
pout = 1 - pout;
pin = 1 - pout;
if (thid >= offset) {
temp[pout * n + thid] =
temp[pin * n + thid] + temp[pin * n + thid - offset];
} else {
temp[pout * n + thid] = temp[pin * n + thid];
}
__syncthreads();
}
// Copy data back to device memory
Dout[thid] = temp[pout * n + thid];
}
}
/*
3 Adds the previous block sum to every element of output
*/
__global__ void block_adder_kernel(float *Dout, const float *blockSums,
unsigned int total_size) {
unsigned int arrIdx = threadIdx.x + (blockIdx.x * blockDim.x);
if (arrIdx <= total_size)
Dout[arrIdx] += blockSums[blockIdx.x];
__syncthreads();
}
__host__ void scan(const float *in, float *out, unsigned int n,
unsigned int threads_per_block) {
float *Din, *Dout, *blockSums, *blockSums_out;
unsigned int shMemSize = (2 * threads_per_block) * sizeof(float);
cudaError_t err = cudaSuccess;
unsigned int n_blocks;
// Allocate space for device: Din
err = cudaMalloc((void **)&Din, n * sizeof(float));
if (err != cudaSuccess)
cout << "cudaMalloc Failed for Input Array! Error: " << err << endl;
err = cudaMalloc((void **)&Dout, n * sizeof(float));
if (err != cudaSuccess)
cout << "cudaMalloc Failed for Output Array! Error: " << err << endl;
// We need a separate array to store block sums
n_blocks = (n + (threads_per_block - 1)) / threads_per_block;
err = cudaMalloc((void **)&blockSums, n_blocks * sizeof(float));
if (err != cudaSuccess)
cout << "cudaMalloc Failed for BlockSums Array! Error: " << err << endl;
cudaMemset(blockSums, 0, n_blocks); // Initializing cudaMemset
// Copying the data to device
cudaMemcpy(Din, in, n * sizeof(float), cudaMemcpyHostToDevice);
// Implementation that supports n upto 1024
hillis_steele<<<n_blocks, threads_per_block, shMemSize>>>(
Din, Dout, threads_per_block, blockSums, n);
err = cudaMalloc((void **)&blockSums_out, n_blocks * sizeof(float));
if (err != cudaSuccess)
cout << "cudaMalloc Failed for BlockSums_Out Array! Error: " << err << endl;
// Running hillis steele algo again, this time on block_sums
// If we keep the batch_size = 1024, we cannot possibly have n_blocks > 1024
// because Assumption: n <= threads_per_block * threads_per_block
block_hillis_steele<<<1, threads_per_block, shMemSize>>>(
blockSums, blockSums_out, n_blocks);
block_adder_kernel<<<n_blocks, threads_per_block>>>(Dout, blockSums_out, n);
// Copy results back to the host
cudaMemcpy(out, Dout, n * sizeof(float), cudaMemcpyDeviceToHost);
// Wait for GPU to finish
cudaDeviceSynchronize();
// Cleanup
cudaFree(Din);
cudaFree(Dout);
cudaFree(blockSums);
cudaFree(blockSums_out);
}
|
8b99cdf09bdc18a6a3f07c554f6f71984b6e26da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaUtils_hip.cuh"
#include "TotalVariation.h"
#include <thrust/device_vector.h>
#include <thrust/copy.h>
__global__ void differenceKernel(
uint8* image,
double* diff,
int height, int width, int channels,
double lambda1, double lambda2
) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= width) { return; }
int y = blockIdx.y * blockDim.y + threadIdx.y;
int ch = blockIdx.z * blockDim.z + threadIdx.z;
int idx = y * width * channels + x * channels + ch;
double difference = 0;
double center = (double)image[idx];
if (x > 0 && x < width - 1) {
double plusX = (double)image[idx + channels];
double minusX = (double)image[idx - channels];
difference += lambda1 * abs(plusX - center)
+ lambda2 * abs(minusX - 2.0 * center + plusX);
}
if (y > 0 && y < height - 1) {
int yShift = width * channels;
double plusY = (double)image[idx + yShift];
double minusY = (double)image[idx - yShift];
difference += lambda1 * abs(plusY - center)
+ lambda2 * abs(minusY - 2.0 * center + plusY);
}
diff[idx] = difference;
}
double TotalGeneralizedVariationCUDA(
uint8* image,
int height, int width, int channels,
double lambda1, double lambda2,
double& elapsed,
double& kernelElapsed
) {
double* devDifference = NULL;
uint8* devInputImage = NULL;
long imageSizeInBytes = height * width * channels;
CudaTimer timer = CudaTimer();
hipMalloc((void**)&devInputImage, imageSizeInBytes);
hipMalloc((void**)&devDifference, imageSizeInBytes * sizeof(double));
hipMemcpy(devInputImage, image, imageSizeInBytes, hipMemcpyHostToDevice);
int blockSize = min(1024, width);
dim3 gridSize((width + blockSize - 1) / blockSize, height, channels);
CudaTimer kernelTimer = CudaTimer();
SAFE_KERNEL_CALL((
hipLaunchKernelGGL(( differenceKernel) , dim3(gridSize), dim3(blockSize), 0, 0,
devInputImage, devDifference, height, width, channels, lambda1, lambda2
)
));
kernelElapsed = kernelTimer.stop();
double result = thrust::reduce(
thrust::device,
devDifference,
devDifference + imageSizeInBytes,
0.0,
thrust::plus<double>()
);
hipFree(devInputImage);
hipFree(devDifference);
elapsed = timer.stop();
return result;
} | 8b99cdf09bdc18a6a3f07c554f6f71984b6e26da.cu | #include "CudaUtils.cuh"
#include "TotalVariation.h"
#include <thrust/device_vector.h>
#include <thrust/copy.h>
__global__ void differenceKernel(
uint8* image,
double* diff,
int height, int width, int channels,
double lambda1, double lambda2
) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= width) { return; }
int y = blockIdx.y * blockDim.y + threadIdx.y;
int ch = blockIdx.z * blockDim.z + threadIdx.z;
int idx = y * width * channels + x * channels + ch;
double difference = 0;
double center = (double)image[idx];
if (x > 0 && x < width - 1) {
double plusX = (double)image[idx + channels];
double minusX = (double)image[idx - channels];
difference += lambda1 * abs(plusX - center)
+ lambda2 * abs(minusX - 2.0 * center + plusX);
}
if (y > 0 && y < height - 1) {
int yShift = width * channels;
double plusY = (double)image[idx + yShift];
double minusY = (double)image[idx - yShift];
difference += lambda1 * abs(plusY - center)
+ lambda2 * abs(minusY - 2.0 * center + plusY);
}
diff[idx] = difference;
}
double TotalGeneralizedVariationCUDA(
uint8* image,
int height, int width, int channels,
double lambda1, double lambda2,
double& elapsed,
double& kernelElapsed
) {
double* devDifference = NULL;
uint8* devInputImage = NULL;
long imageSizeInBytes = height * width * channels;
CudaTimer timer = CudaTimer();
cudaMalloc((void**)&devInputImage, imageSizeInBytes);
cudaMalloc((void**)&devDifference, imageSizeInBytes * sizeof(double));
cudaMemcpy(devInputImage, image, imageSizeInBytes, cudaMemcpyHostToDevice);
int blockSize = min(1024, width);
dim3 gridSize((width + blockSize - 1) / blockSize, height, channels);
CudaTimer kernelTimer = CudaTimer();
SAFE_KERNEL_CALL((
differenceKernel <<<gridSize, blockSize>>> (
devInputImage, devDifference, height, width, channels, lambda1, lambda2
)
));
kernelElapsed = kernelTimer.stop();
double result = thrust::reduce(
thrust::device,
devDifference,
devDifference + imageSizeInBytes,
0.0,
thrust::plus<double>()
);
cudaFree(devInputImage);
cudaFree(devDifference);
elapsed = timer.stop();
return result;
} |
83533f113a9b0b0ec990ca6a698b43f89c8a6233.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
namespace fastertransformer {
template <typename T, bool ALIVE>
__global__ void update_logits_kernel(T* logits,
const T* bias,
const int end_id,
const bool* finished,
const int n) {
int bid = blockIdx.x;
bool finish = ALIVE ? false : finished[bid];
int offset = bid * n;
float max_val = -1 * FLT_MAX;
__shared__ float s_max_val;
__shared__ float s_sum_val;
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
if (finish)
logits[offset + tid] = (tid == end_id) ? FLT_MAX : -1 * FLT_MAX;
else
logits[offset + tid] += bias[tid];
max_val = max(max_val, logits[offset + tid]);
}
max_val = blockReduceMax<float>((float)max_val);
if (threadIdx.x == 0) s_max_val = max_val;
__syncthreads();
float sum_val = 0.0f;
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
logits[offset + tid] = __expf((float)logits[offset + tid] - s_max_val);
sum_val += (float)logits[offset + tid];
}
sum_val = blockReduceSum<float>(sum_val);
if (threadIdx.x == 0) s_sum_val = sum_val;
__syncthreads();
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
logits[offset + tid] = logf((float)logits[offset + tid] / s_sum_val);
}
}
void update_logits_v2(float* logits,
const float* bias,
const int end_id,
const bool* finished,
const int m,
const int n,
hipStream_t stream) {
dim3 grid(m);
dim3 block(min(n, 1024));
/*n is the vocab_size, e.g., 30000, 7000.... vocab_size is usually very big.
*/
hipLaunchKernelGGL(( update_logits_kernel<float, true>), dim3(grid), dim3(block), 0, stream,
logits, bias, end_id, finished, n);
}
} // namespace fastertransformer | 83533f113a9b0b0ec990ca6a698b43f89c8a6233.cu | /*
* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
namespace fastertransformer {
template <typename T, bool ALIVE>
__global__ void update_logits_kernel(T* logits,
const T* bias,
const int end_id,
const bool* finished,
const int n) {
int bid = blockIdx.x;
bool finish = ALIVE ? false : finished[bid];
int offset = bid * n;
float max_val = -1 * FLT_MAX;
__shared__ float s_max_val;
__shared__ float s_sum_val;
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
if (finish)
logits[offset + tid] = (tid == end_id) ? FLT_MAX : -1 * FLT_MAX;
else
logits[offset + tid] += bias[tid];
max_val = max(max_val, logits[offset + tid]);
}
max_val = blockReduceMax<float>((float)max_val);
if (threadIdx.x == 0) s_max_val = max_val;
__syncthreads();
float sum_val = 0.0f;
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
logits[offset + tid] = __expf((float)logits[offset + tid] - s_max_val);
sum_val += (float)logits[offset + tid];
}
sum_val = blockReduceSum<float>(sum_val);
if (threadIdx.x == 0) s_sum_val = sum_val;
__syncthreads();
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
logits[offset + tid] = logf((float)logits[offset + tid] / s_sum_val);
}
}
void update_logits_v2(float* logits,
const float* bias,
const int end_id,
const bool* finished,
const int m,
const int n,
cudaStream_t stream) {
dim3 grid(m);
dim3 block(min(n, 1024));
/*n is the vocab_size, e.g., 30000, 7000.... vocab_size is usually very big.
*/
update_logits_kernel<float, true><<<grid, block, 0, stream>>>(
logits, bias, end_id, finished, n);
}
} // namespace fastertransformer |
50871ce100787008edcd370b781b0e8c25be6929.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2020 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO LICENSEE:
*
* This source code and/or documentation ("Licensed Deliverables") are
* subject to NVIDIA intellectual property rights under U.S. and
* international Copyright laws.
*
* These Licensed Deliverables contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a form of NVIDIA software license agreement by and
* between NVIDIA and Licensee ("License Agreement") or electronically
* accepted by Licensee. Notwithstanding any terms or conditions to
* the contrary in the License Agreement, reproduction or disclosure
* of the Licensed Deliverables to any third party without the express
* written consent of NVIDIA is prohibited.
*
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THESE LICENSED DELIVERABLES.
*
* U.S. Government End Users. These Licensed Deliverables are a
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
* 1995), consisting of "commercial computer software" and "commercial
* computer software documentation" as such terms are used in 48
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
* U.S. Government End Users acquire the Licensed Deliverables with
* only those rights set forth herein.
*
* Any use of the Licensed Deliverables in individual and commercial
* software must include, in the user documentation and internal
* comments to the code, the above Disclaimer and U.S. Government End
* Users Notice.
*/
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include "cublas_utils.h"
using data_type = double;
int main(int argc, char *argv[]) {
hipblasHandle_t cublasH = NULL;
hipStream_t stream = NULL;
/*
* A = | 1.0 2.0 3.0 4.0 |
* B = | 0.0 0.0 0.0 0.0 |
*/
const std::vector<data_type> A = {1.0, 2.0, 3.0, 4.0};
std::vector<data_type> B(A.size(), 0);
const int incx = 1;
const int incy = 1;
data_type *d_A = nullptr;
data_type *d_B = nullptr;
printf("A\n");
print_vector(A.size(), A.data());
printf("=====\n");
printf("B\n");
print_vector(B.size(), B.data());
printf("=====\n");
/* step 1: create cublas handle, bind a stream */
CUBLAS_CHECK(hipblasCreate(&cublasH));
CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
CUBLAS_CHECK(hipblasSetStream(cublasH, stream));
/* step 2: copy data to device */
CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * A.size()));
CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_B), sizeof(data_type) * B.size()));
CUDA_CHECK(hipMemcpyAsync(d_A, A.data(), sizeof(data_type) * A.size(), hipMemcpyHostToDevice,
stream));
CUDA_CHECK(hipMemcpyAsync(d_B, B.data(), sizeof(data_type) * B.size(), hipMemcpyHostToDevice,
stream));
/* step 3: compute */
CUBLAS_CHECK(hipblasDcopy(cublasH, A.size(), d_A, incx, d_B, incy));
/* step 4: copy data to host */
CUDA_CHECK(hipMemcpyAsync(B.data(), d_B, sizeof(data_type) * B.size(), hipMemcpyDeviceToHost,
stream));
CUDA_CHECK(hipStreamSynchronize(stream));
/*
* B = | 1.0 2.0 3.0 4.0 |
*/
printf("B\n");
print_vector(B.size(), B.data());
printf("=====\n");
/* free resources */
CUDA_CHECK(hipFree(d_A));
CUDA_CHECK(hipFree(d_B));
CUBLAS_CHECK(hipblasDestroy(cublasH));
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
| 50871ce100787008edcd370b781b0e8c25be6929.cu | /*
* Copyright 2020 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO LICENSEE:
*
* This source code and/or documentation ("Licensed Deliverables") are
* subject to NVIDIA intellectual property rights under U.S. and
* international Copyright laws.
*
* These Licensed Deliverables contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a form of NVIDIA software license agreement by and
* between NVIDIA and Licensee ("License Agreement") or electronically
* accepted by Licensee. Notwithstanding any terms or conditions to
* the contrary in the License Agreement, reproduction or disclosure
* of the Licensed Deliverables to any third party without the express
* written consent of NVIDIA is prohibited.
*
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THESE LICENSED DELIVERABLES.
*
* U.S. Government End Users. These Licensed Deliverables are a
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
* 1995), consisting of "commercial computer software" and "commercial
* computer software documentation" as such terms are used in 48
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
* U.S. Government End Users acquire the Licensed Deliverables with
* only those rights set forth herein.
*
* Any use of the Licensed Deliverables in individual and commercial
* software must include, in the user documentation and internal
* comments to the code, the above Disclaimer and U.S. Government End
* Users Notice.
*/
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include "cublas_utils.h"
using data_type = double;
int main(int argc, char *argv[]) {
cublasHandle_t cublasH = NULL;
cudaStream_t stream = NULL;
/*
* A = | 1.0 2.0 3.0 4.0 |
* B = | 0.0 0.0 0.0 0.0 |
*/
const std::vector<data_type> A = {1.0, 2.0, 3.0, 4.0};
std::vector<data_type> B(A.size(), 0);
const int incx = 1;
const int incy = 1;
data_type *d_A = nullptr;
data_type *d_B = nullptr;
printf("A\n");
print_vector(A.size(), A.data());
printf("=====\n");
printf("B\n");
print_vector(B.size(), B.data());
printf("=====\n");
/* step 1: create cublas handle, bind a stream */
CUBLAS_CHECK(cublasCreate(&cublasH));
CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
CUBLAS_CHECK(cublasSetStream(cublasH, stream));
/* step 2: copy data to device */
CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * A.size()));
CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_B), sizeof(data_type) * B.size()));
CUDA_CHECK(cudaMemcpyAsync(d_A, A.data(), sizeof(data_type) * A.size(), cudaMemcpyHostToDevice,
stream));
CUDA_CHECK(cudaMemcpyAsync(d_B, B.data(), sizeof(data_type) * B.size(), cudaMemcpyHostToDevice,
stream));
/* step 3: compute */
CUBLAS_CHECK(cublasDcopy(cublasH, A.size(), d_A, incx, d_B, incy));
/* step 4: copy data to host */
CUDA_CHECK(cudaMemcpyAsync(B.data(), d_B, sizeof(data_type) * B.size(), cudaMemcpyDeviceToHost,
stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
/*
* B = | 1.0 2.0 3.0 4.0 |
*/
printf("B\n");
print_vector(B.size(), B.data());
printf("=====\n");
/* free resources */
CUDA_CHECK(cudaFree(d_A));
CUDA_CHECK(cudaFree(d_B));
CUBLAS_CHECK(cublasDestroy(cublasH));
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
a371141b48b7f2be3c47b61215436bb939989aa8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "stdio.h"
#include "stdlib.h"
#include <string.h>
#include "algorithmCudaNormal.h"
#include "algorithmCudaNormalInternal.h"
namespace AlgorithmCudaNormal
{
#if 0
} // indent guard
#endif
int NUM_STREAM = NUM_STREAM_MAX;
void cudaInitialize(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height)
{
NUM_STREAM = height / BLOCK_SIZE_H;
if (NUM_STREAM > NUM_STREAM_MAX) NUM_STREAM = NUM_STREAM_MAX;
#if defined(USE_ZEROCOPY_MEMORY)
#if defined(USE_PINNED_MEMORY)
CHECK(hipHostMalloc((void**)¶m->hostMatSrc, (width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN) * sizeof(int), hipHostMallocMapped));
CHECK(hipHostMalloc((void**)¶m->hostMatDst, (width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN) * sizeof(int), hipHostMallocMapped));
#else
CHECK(hipHostMalloc((void**)¶m->hostMatSrc, (width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN) * sizeof(int), hipHostMallocMapped));
CHECK(hipHostMalloc((void**)¶m->hostMatDst, (width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN) * sizeof(int), hipHostMallocMapped));
#endif
#elif defined(USE_PINNED_MEMORY)
CHECK(hipHostMalloc((void**)¶m->hostMatSrc, (width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN) * sizeof(int)));
CHECK(hipHostMalloc((void**)¶m->hostMatDst, (width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN) * sizeof(int)));
#else
param->hostMatSrc = new int[(width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN)];
param->hostMatDst = new int[(width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN)];
#endif
#ifdef USE_ZEROCOPY_MEMORY
CHECK(hipHostGetDevicePointer((void**)¶m->devMatSrc, (void*)param->hostMatSrc, 0));
CHECK(hipHostGetDevicePointer((void**)¶m->devMatDst, (void*)param->hostMatDst, 0));
#else
CHECK(hipMalloc((void**)¶m->devMatSrc, (width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN) * sizeof(int)));
CHECK(hipMalloc((void**)¶m->devMatDst, (width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN) * sizeof(int)));
#endif
param->isMatrixUpdated = 1;
for (int i = 0; i < NUM_STREAM; i++) {
hipStream_t *stream;
stream = new hipStream_t;
CHECK(hipStreamCreate(stream));
param->pStream[i] = (void*)stream;
}
}
void cudaFinalize(ALGORITHM_CUDA_NORMAL_PARAM *param)
{
for (int i = 0; i < NUM_STREAM; i++) {
hipStream_t *stream = (hipStream_t*)(param->pStream[i]);
CHECK(hipStreamDestroy(*stream));
delete stream;
}
#if !defined(USE_ZEROCOPY_MEMORY)
CHECK(hipFree(param->devMatSrc));
CHECK(hipFree(param->devMatDst));
#endif
#if defined(USE_PINNED_MEMORY) || defined(USE_ZEROCOPY_MEMORY)
CHECK(hipHostFree(param->hostMatSrc));
CHECK(hipHostFree(param->hostMatDst));
#else
delete param->hostMatSrc;
delete param->hostMatDst;
#endif
/* todo: call this at the really end of the application */
//CHECK(hipDeviceReset());
}
void cudaProcess(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height, int repeatNum)
{
for (int i = 0; i < repeatNum; i++) {
#if defined(ALGORITHM_0)
extern void process_0(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height);
process_0(param, width, height);
#elif defined(ALGORITHM_0_STREAM)
extern void process_0_stream(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height);
process_0_stream(param, width, height);
#elif defined(ALGORITHM_1)
extern void process_1(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height);
process_1(param, width, height);
#elif defined(ALGORITHM_2)
extern void process_2(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height);
process_2(param, width, height);
#elif defined(ALGORITHM_2_STREAM)
extern void process_2_stream(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height);
process_2_stream(param, width, height);
#elif defined(ALGORITHM_3_STREAM)
extern void process_3_stream(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height);
process_3_stream(param, width, height);
#elif defined(ALGORITHM_3_REPEAT)
extern void process_3_repeat(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height, int repeatNum);
process_3_repeat(param, width, height, repeatNum);
break;
#elif defined(ALGORITHM_3_AUTO)
extern void process_3_stream(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height);
extern void process_3_repeat(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height, int repeatNum);
if (repeatNum == 1) {
process_3_stream(param, width, height);
} else {
process_3_repeat(param, width, height, repeatNum);
}
break;
#endif
}
}
void swapMat(ALGORITHM_CUDA_NORMAL_PARAM *param)
{
int *temp;
temp = param->devMatDst;
param->devMatDst = param->devMatSrc;
param->devMatSrc = temp;
temp = param->hostMatDst;
param->hostMatDst = param->hostMatSrc;
param->hostMatSrc = temp;
}
/*
* Don't use hipMallocManaged
* Memory access exception occurs when I call logicForOneGeneration from several threads
*/
void cudaAllocManaged(int **p, int size)
{
hipMallocManaged(p, size);
}
void cudaFreeManaged(int *p)
{
hipFree(p);
}
void cudaDeviceSynchronizeWrapper()
{
hipDeviceSynchronize();
}
}
| a371141b48b7f2be3c47b61215436bb939989aa8.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "stdio.h"
#include "stdlib.h"
#include <string.h>
#include "algorithmCudaNormal.h"
#include "algorithmCudaNormalInternal.h"
namespace AlgorithmCudaNormal
{
#if 0
} // indent guard
#endif
int NUM_STREAM = NUM_STREAM_MAX;
void cudaInitialize(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height)
{
NUM_STREAM = height / BLOCK_SIZE_H;
if (NUM_STREAM > NUM_STREAM_MAX) NUM_STREAM = NUM_STREAM_MAX;
#if defined(USE_ZEROCOPY_MEMORY)
#if defined(USE_PINNED_MEMORY)
CHECK(cudaMallocHost((void**)¶m->hostMatSrc, (width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN) * sizeof(int), cudaHostAllocMapped));
CHECK(cudaMallocHost((void**)¶m->hostMatDst, (width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN) * sizeof(int), cudaHostAllocMapped));
#else
CHECK(cudaHostAlloc((void**)¶m->hostMatSrc, (width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN) * sizeof(int), cudaHostAllocMapped));
CHECK(cudaHostAlloc((void**)¶m->hostMatDst, (width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN) * sizeof(int), cudaHostAllocMapped));
#endif
#elif defined(USE_PINNED_MEMORY)
CHECK(cudaMallocHost((void**)¶m->hostMatSrc, (width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN) * sizeof(int)));
CHECK(cudaMallocHost((void**)¶m->hostMatDst, (width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN) * sizeof(int)));
#else
param->hostMatSrc = new int[(width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN)];
param->hostMatDst = new int[(width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN)];
#endif
#ifdef USE_ZEROCOPY_MEMORY
CHECK(cudaHostGetDevicePointer((void**)¶m->devMatSrc, (void*)param->hostMatSrc, 0));
CHECK(cudaHostGetDevicePointer((void**)¶m->devMatDst, (void*)param->hostMatDst, 0));
#else
CHECK(cudaMalloc((void**)¶m->devMatSrc, (width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN) * sizeof(int)));
CHECK(cudaMalloc((void**)¶m->devMatDst, (width + 2 * MEMORY_MARGIN) * (height + 2 * MEMORY_MARGIN) * sizeof(int)));
#endif
param->isMatrixUpdated = 1;
for (int i = 0; i < NUM_STREAM; i++) {
cudaStream_t *stream;
stream = new cudaStream_t;
CHECK(cudaStreamCreate(stream));
param->pStream[i] = (void*)stream;
}
}
void cudaFinalize(ALGORITHM_CUDA_NORMAL_PARAM *param)
{
for (int i = 0; i < NUM_STREAM; i++) {
cudaStream_t *stream = (cudaStream_t*)(param->pStream[i]);
CHECK(cudaStreamDestroy(*stream));
delete stream;
}
#if !defined(USE_ZEROCOPY_MEMORY)
CHECK(cudaFree(param->devMatSrc));
CHECK(cudaFree(param->devMatDst));
#endif
#if defined(USE_PINNED_MEMORY) || defined(USE_ZEROCOPY_MEMORY)
CHECK(cudaFreeHost(param->hostMatSrc));
CHECK(cudaFreeHost(param->hostMatDst));
#else
delete param->hostMatSrc;
delete param->hostMatDst;
#endif
/* todo: call this at the really end of the application */
//CHECK(cudaDeviceReset());
}
void cudaProcess(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height, int repeatNum)
{
for (int i = 0; i < repeatNum; i++) {
#if defined(ALGORITHM_0)
extern void process_0(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height);
process_0(param, width, height);
#elif defined(ALGORITHM_0_STREAM)
extern void process_0_stream(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height);
process_0_stream(param, width, height);
#elif defined(ALGORITHM_1)
extern void process_1(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height);
process_1(param, width, height);
#elif defined(ALGORITHM_2)
extern void process_2(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height);
process_2(param, width, height);
#elif defined(ALGORITHM_2_STREAM)
extern void process_2_stream(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height);
process_2_stream(param, width, height);
#elif defined(ALGORITHM_3_STREAM)
extern void process_3_stream(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height);
process_3_stream(param, width, height);
#elif defined(ALGORITHM_3_REPEAT)
extern void process_3_repeat(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height, int repeatNum);
process_3_repeat(param, width, height, repeatNum);
break;
#elif defined(ALGORITHM_3_AUTO)
extern void process_3_stream(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height);
extern void process_3_repeat(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height, int repeatNum);
if (repeatNum == 1) {
process_3_stream(param, width, height);
} else {
process_3_repeat(param, width, height, repeatNum);
}
break;
#endif
}
}
void swapMat(ALGORITHM_CUDA_NORMAL_PARAM *param)
{
int *temp;
temp = param->devMatDst;
param->devMatDst = param->devMatSrc;
param->devMatSrc = temp;
temp = param->hostMatDst;
param->hostMatDst = param->hostMatSrc;
param->hostMatSrc = temp;
}
/*
* Don't use cudaMallocManaged
* Memory access exception occurs when I call logicForOneGeneration from several threads
*/
void cudaAllocManaged(int **p, int size)
{
cudaMallocManaged(p, size);
}
void cudaFreeManaged(int *p)
{
cudaFree(p);
}
void cudaDeviceSynchronizeWrapper()
{
cudaDeviceSynchronize();
}
}
|
eaa3b3eb7d9489d5566772620e38823167d06fd0.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_image.h>
// convert floating point rgba color to 32-bit integer
__device__ unsigned int rgbaFloatToInt(float4 rgba) {
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return ((unsigned int)(rgba.w * 255.0f) << 24) |
((unsigned int)(rgba.z * 255.0f) << 16) |
((unsigned int)(rgba.y * 255.0f) << 8) |
((unsigned int)(rgba.x * 255.0f));
}
////////////////////////////////////////////////////////////////////////////////
//! Rotate an image using texture lookups
//! @param outputData output data in global memory
////////////////////////////////////////////////////////////////////////////////
static __global__ void transformKernel(unsigned int *outputData, int width,
int height, float theta,
hipTextureObject_t tex) {
// calculate normalized texture coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
float u = (float)x - (float)width / 2;
float v = (float)y - (float)height / 2;
float tu = u * cosf(theta) - v * sinf(theta);
float tv = v * cosf(theta) + u * sinf(theta);
tu /= (float)width;
tv /= (float)height;
// read from texture and write to global memory
float4 pix = tex2D<float4>(tex, tu + 0.5f, tv + 0.5f);
unsigned int pixelInt = rgbaFloatToInt(pix);
outputData[y * width + x] = pixelInt;
}
static __global__ void rgbToGrayscaleKernel(unsigned int *rgbaImage,
size_t imageWidth,
size_t imageHeight) {
size_t gidX = blockDim.x * blockIdx.x + threadIdx.x;
uchar4 *pixArray = (uchar4 *)rgbaImage;
for (int pixId = gidX; pixId < imageWidth * imageHeight;
pixId += gridDim.x * blockDim.x) {
uchar4 dataA = pixArray[pixId];
unsigned char grayscale =
(unsigned char)(dataA.x * 0.3 + dataA.y * 0.59 + dataA.z * 0.11);
uchar4 dataB = make_uchar4(grayscale, grayscale, grayscale, 0);
pixArray[pixId] = dataB;
}
}
void launchGrayScaleKernel(unsigned int *d_rgbaImage,
std::string image_filename, size_t imageWidth,
size_t imageHeight, hipStream_t stream) {
int numThreadsPerBlock = 1024;
int numOfBlocks = (imageWidth * imageHeight) / numThreadsPerBlock;
hipLaunchKernelGGL(( rgbToGrayscaleKernel), dim3(numOfBlocks), dim3(numThreadsPerBlock), 0, stream,
d_rgbaImage, imageWidth, imageHeight);
unsigned int *outputData =
(unsigned int *)malloc(sizeof(unsigned int) * imageWidth * imageHeight);
checkCudaErrors(hipMemcpyAsync(
outputData, d_rgbaImage, sizeof(unsigned int) * imageWidth * imageHeight,
hipMemcpyDeviceToHost, stream));
checkCudaErrors(hipStreamSynchronize(stream));
char outputFilename[1024];
strcpy(outputFilename, image_filename.c_str());
strcpy(outputFilename + image_filename.length() - 4, "_out.ppm");
sdkSavePPM4ub(outputFilename, (unsigned char *)outputData, imageWidth,
imageHeight);
printf("Wrote '%s'\n", outputFilename);
free(outputData);
}
void rotateKernel(hipTextureObject_t &texObj, const float angle,
unsigned int *d_outputData, const int imageWidth,
const int imageHeight, hipStream_t stream) {
dim3 dimBlock(8, 8, 1);
dim3 dimGrid(imageWidth / dimBlock.x, imageHeight / dimBlock.y, 1);
hipLaunchKernelGGL(( transformKernel), dim3(dimGrid), dim3(dimBlock), 0, stream, d_outputData, imageWidth,
imageHeight, angle, texObj);
}
| eaa3b3eb7d9489d5566772620e38823167d06fd0.cu | /* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cuda.h>
#include <helper_cuda.h>
#include <helper_image.h>
// convert floating point rgba color to 32-bit integer
__device__ unsigned int rgbaFloatToInt(float4 rgba) {
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return ((unsigned int)(rgba.w * 255.0f) << 24) |
((unsigned int)(rgba.z * 255.0f) << 16) |
((unsigned int)(rgba.y * 255.0f) << 8) |
((unsigned int)(rgba.x * 255.0f));
}
////////////////////////////////////////////////////////////////////////////////
//! Rotate an image using texture lookups
//! @param outputData output data in global memory
////////////////////////////////////////////////////////////////////////////////
static __global__ void transformKernel(unsigned int *outputData, int width,
int height, float theta,
cudaTextureObject_t tex) {
// calculate normalized texture coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
float u = (float)x - (float)width / 2;
float v = (float)y - (float)height / 2;
float tu = u * cosf(theta) - v * sinf(theta);
float tv = v * cosf(theta) + u * sinf(theta);
tu /= (float)width;
tv /= (float)height;
// read from texture and write to global memory
float4 pix = tex2D<float4>(tex, tu + 0.5f, tv + 0.5f);
unsigned int pixelInt = rgbaFloatToInt(pix);
outputData[y * width + x] = pixelInt;
}
static __global__ void rgbToGrayscaleKernel(unsigned int *rgbaImage,
size_t imageWidth,
size_t imageHeight) {
size_t gidX = blockDim.x * blockIdx.x + threadIdx.x;
uchar4 *pixArray = (uchar4 *)rgbaImage;
for (int pixId = gidX; pixId < imageWidth * imageHeight;
pixId += gridDim.x * blockDim.x) {
uchar4 dataA = pixArray[pixId];
unsigned char grayscale =
(unsigned char)(dataA.x * 0.3 + dataA.y * 0.59 + dataA.z * 0.11);
uchar4 dataB = make_uchar4(grayscale, grayscale, grayscale, 0);
pixArray[pixId] = dataB;
}
}
void launchGrayScaleKernel(unsigned int *d_rgbaImage,
std::string image_filename, size_t imageWidth,
size_t imageHeight, cudaStream_t stream) {
int numThreadsPerBlock = 1024;
int numOfBlocks = (imageWidth * imageHeight) / numThreadsPerBlock;
rgbToGrayscaleKernel<<<numOfBlocks, numThreadsPerBlock, 0, stream>>>(
d_rgbaImage, imageWidth, imageHeight);
unsigned int *outputData =
(unsigned int *)malloc(sizeof(unsigned int) * imageWidth * imageHeight);
checkCudaErrors(cudaMemcpyAsync(
outputData, d_rgbaImage, sizeof(unsigned int) * imageWidth * imageHeight,
cudaMemcpyDeviceToHost, stream));
checkCudaErrors(cudaStreamSynchronize(stream));
char outputFilename[1024];
strcpy(outputFilename, image_filename.c_str());
strcpy(outputFilename + image_filename.length() - 4, "_out.ppm");
sdkSavePPM4ub(outputFilename, (unsigned char *)outputData, imageWidth,
imageHeight);
printf("Wrote '%s'\n", outputFilename);
free(outputData);
}
void rotateKernel(cudaTextureObject_t &texObj, const float angle,
unsigned int *d_outputData, const int imageWidth,
const int imageHeight, cudaStream_t stream) {
dim3 dimBlock(8, 8, 1);
dim3 dimGrid(imageWidth / dimBlock.x, imageHeight / dimBlock.y, 1);
transformKernel<<<dimGrid, dimBlock, 0, stream>>>(d_outputData, imageWidth,
imageHeight, angle, texObj);
}
|
6b53c2ade390b0ef0e1104a0a7ecec473d1c4081.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/pooling.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace math {
template <typename PoolProcess, typename T>
__global__ void KernelPool2D(const int nthreads, const T* input_data,
T* output_data, const int channels,
const int input_height, const int input_width,
const int output_height, const int output_width,
const int ksize_height, const int ksize_width,
const int stride_height, const int stride_width,
const int padding_height, const int padding_width,
PoolProcess pool_process) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int c = (index / output_width / output_height) % channels;
int batch_idx = index / output_width / output_height / channels;
int hstart = ph * stride_height - padding_height;
int hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
int wstart = pw * stride_width - padding_width;
int wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
input_data += (batch_idx * channels + c) * input_height * input_width;
T ele = pool_process.initial();
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
pool_process.compute(ele, input_data[h * input_width + w]);
}
}
int pool_size = (hend - hstart) * (wend - wstart);
pool_process.finalize(ele, (static_cast<T>(pool_size)));
output_data[index] = ele;
}
}
template <typename PoolProcess, typename T>
__global__ void KernelPool2DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, T* input_grad, const int channels,
const int input_height, const int input_width, const int output_height,
const int output_width, const int ksize_height, const int ksize_width,
const int stride_height, const int stride_width, const int padding_height,
const int padding_width, PoolProcess pool_process) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int offsetW = index % input_width + padding_width;
int offsetH = (index / input_width) % input_height + padding_height;
int offsetC = (index / input_width / input_height) % channels;
int batch_idx = index / input_width / input_height / channels;
int phstart = (offsetH < ksize_height)
? 0
: (offsetH - ksize_height) / stride_height + 1;
int pwstart = (offsetW < ksize_width)
? 0
: (offsetW - ksize_width) / stride_width + 1;
int phend = min(offsetH / stride_height + 1, output_height);
int pwend = min(offsetW / stride_width + 1, output_width);
T gradient = 0;
T input = input_data[index];
int output_idx =
(batch_idx * channels + offsetC) * output_height * output_width;
output_data += output_idx;
output_grad += output_idx;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
int pool_size = (hend - hstart) * (wend - wstart);
int output_sub_idx = ph * output_width + pw;
pool_process.compute(input, output_data[output_sub_idx],
output_grad[output_sub_idx], gradient,
static_cast<T>(1.0 / pool_size));
}
}
input_grad[index] = gradient;
}
}
template <typename T>
__global__ void KernelMaxPool2DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, T* input_grad, const int channels,
const int input_height, const int input_width, const int output_height,
const int output_width, const int ksize_height, const int ksize_width,
const int stride_height, const int stride_width, const int padding_height,
const int padding_width) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int c = (index / output_width / output_height) % channels;
int batch_idx = index / output_width / output_height / channels;
int hstart = ph * stride_height - padding_height;
int hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
int wstart = pw * stride_width - padding_width;
int wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
input_data += (batch_idx * channels + c) * input_height * input_width;
input_grad += (batch_idx * channels + c) * input_height * input_width;
T ele = output_data[index];
int maxIndex = -1;
bool stop = false;
for (int h = hstart; h < hend && !stop; ++h) {
for (int w = wstart; w < wend && !stop; ++w) {
if (ele == input_data[h * input_width + w]) {
maxIndex = h * input_width + w;
stop = true;
}
}
}
if (maxIndex != -1) {
// atomic add
platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]);
}
}
}
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename PoolProcess, typename T>
class Pool2dFunctor<platform::GPUPlace, PoolProcess, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor& output,
std::vector<int>& ksize, std::vector<int>& strides,
std::vector<int>& paddings, PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
T* output_data = output.mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool2D<
PoolProcess,
T>), dim3(grid), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream(), nthreads, input_data, output_data, input_channels,
input_height, input_width, output_height,
output_width, ksize_height, ksize_width,
stride_height, stride_width, padding_height,
padding_width, pool_process);
}
};
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename PoolProcess, typename T>
class Pool2dGradFunctor<platform::GPUPlace, PoolProcess, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor& input_grad,
const framework::Tensor& output,
const framework::Tensor& output_grad, std::vector<int>& ksize,
std::vector<int>& strides, std::vector<int>& paddings,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
int nthreads = batch_size * input_channels * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool2DGrad<
PoolProcess,
T>), dim3(grid), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream(),
nthreads, input_data, output_data, output_grad_data, input_grad_data,
input_channels, input_height, input_width, output_height, output_width,
ksize_height, ksize_width, stride_height, stride_width, padding_height,
padding_width, pool_process);
}
};
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T>
class MaxPool2dGradFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor& input_grad,
const framework::Tensor& output,
const framework::Tensor& output_grad, std::vector<int>& ksize,
std::vector<int>& strides, std::vector<int>& paddings) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool2DGrad<
T>), dim3(grid), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream(),
nthreads, input_data, output_data, output_grad_data, input_grad_data,
input_channels, input_height, input_width, output_height, output_width,
ksize_height, ksize_width, stride_height, stride_width, padding_height,
padding_width);
}
};
template class MaxPool2dGradFunctor<platform::GPUPlace, float>;
template class MaxPool2dGradFunctor<platform::GPUPlace, double>;
template class Pool2dFunctor<platform::GPUPlace,
paddle::operators::math::MaxPool<float>, float>;
template class Pool2dFunctor<platform::GPUPlace,
paddle::operators::math::AvgPool<float>, float>;
template class Pool2dGradFunctor<
platform::GPUPlace, paddle::operators::math::MaxPoolGrad<float>, float>;
template class Pool2dGradFunctor<
platform::GPUPlace, paddle::operators::math::AvgPoolGrad<float>, float>;
template class Pool2dFunctor<platform::GPUPlace,
paddle::operators::math::MaxPool<double>, double>;
template class Pool2dFunctor<platform::GPUPlace,
paddle::operators::math::AvgPool<double>, double>;
template class Pool2dGradFunctor<
platform::GPUPlace, paddle::operators::math::MaxPoolGrad<double>, double>;
template class Pool2dGradFunctor<
platform::GPUPlace, paddle::operators::math::AvgPoolGrad<double>, double>;
template <typename PoolProcess, typename T>
__global__ void KernelPool3D(
const int nthreads, const T* input_data, T* output_data, const int channels,
const int input_depth, const int input_height, const int input_width,
const int output_depth, const int output_height, const int output_width,
const int ksize_depth, const int ksize_height, const int ksize_width,
const int stride_depth, const int stride_height, const int stride_width,
const int padding_depth, const int padding_height, const int padding_width,
PoolProcess pool_process) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int pd = (index / output_width / output_height) % output_depth;
int c = (index / output_width / output_height / output_depth) % channels;
int batch_idx =
index / output_width / output_height / output_depth / channels;
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
T ele = pool_process.initial();
input_data +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
pool_process.compute(
ele, input_data[(d * input_height + h) * input_width + w]);
}
}
}
int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart);
pool_process.finalize(ele, static_cast<T>(pool_size));
output_data[index] = ele;
}
}
template <typename PoolProcess, typename T>
__global__ void KernelPool3DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, T* input_grad, const int channels,
const int input_depth, const int input_height, const int input_width,
const int output_depth, const int output_height, const int output_width,
const int ksize_depth, const int ksize_height, const int ksize_width,
const int stride_depth, const int stride_height, const int stride_width,
const int padding_depth, const int padding_height, const int padding_width,
PoolProcess pool_process) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int offsetW = index % input_width + padding_width;
int offsetH = (index / input_width) % input_height + padding_height;
int offsetD =
(index / input_width / input_height) % input_depth + padding_depth;
int offsetC = (index / input_width / input_height / input_depth) % channels;
int batch_idx = index / input_width / input_height / input_depth / channels;
int pdstart = (offsetD < ksize_depth)
? 0
: (offsetD - ksize_depth) / stride_depth + 1;
int phstart = (offsetH < ksize_height)
? 0
: (offsetH - ksize_height) / stride_height + 1;
int pwstart = (offsetW < ksize_width)
? 0
: (offsetW - ksize_width) / stride_width + 1;
int pdend = min((offsetD) / stride_depth + 1, output_depth);
int phend = min((offsetH) / stride_height + 1, output_height);
int pwend = min((offsetW) / stride_width + 1, output_width);
T gradient = 0;
T input = input_data[index];
int output_idx = (batch_idx * channels + offsetC) * output_depth *
output_height * output_width;
output_data += output_idx;
output_grad += output_idx;
for (int pd = pdstart; pd < pdend; ++pd) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart);
int output_sub_idx = (pd * output_height + ph) * output_width + pw;
pool_process.compute(input, output_data[output_sub_idx],
output_grad[output_sub_idx], gradient,
static_cast<T>(1.0 / pool_size));
}
}
}
input_grad[index] = gradient;
}
}
template <typename T>
__global__ void KernelMaxPool3DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, T* input_grad, const int channels,
const int input_depth, const int input_height, const int input_width,
const int output_depth, const int output_height, const int output_width,
const int ksize_depth, const int ksize_height, const int ksize_width,
const int stride_depth, const int stride_height, const int stride_width,
const int padding_depth, const int padding_height,
const int padding_width) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int pd = (index / output_width / output_height) % output_depth;
int c = (index / output_width / output_height / output_depth) % channels;
int batch_idx =
index / output_width / output_height / output_depth / channels;
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
T ele = output_data[index];
bool stop = false;
int maxIdx = -1;
input_data +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
input_grad +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
for (int d = dstart; d < dend && !stop; ++d) {
for (int h = hstart; h < hend && !stop; ++h) {
for (int w = wstart; w < wend && !stop; ++w) {
if (ele == input_data[(d * input_height + h) * input_width + w]) {
stop = true;
maxIdx = (d * input_height + h) * input_width + w;
}
}
}
}
if (maxIdx != -1) {
// atomic add
platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]);
}
}
}
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename PoolProcess, class T>
class Pool3dFunctor<platform::GPUPlace, PoolProcess, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor& output,
std::vector<int>& ksize, std::vector<int>& strides,
std::vector<int>& paddings, PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
T* output_data = output.mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool3D<
PoolProcess,
T>), dim3(grid), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream(),
nthreads, input_data, output_data, input_channels, input_depth,
input_height, input_width, output_depth, output_height, output_width,
ksize_depth, ksize_height, ksize_width, stride_depth, stride_height,
stride_width, padding_depth, padding_height, padding_width,
pool_process);
}
};
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename PoolProcess, class T>
class Pool3dGradFunctor<platform::GPUPlace, PoolProcess, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor& input_grad,
const framework::Tensor& output,
const framework::Tensor& output_grad, std::vector<int>& ksize,
std::vector<int>& strides, std::vector<int>& paddings,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool3DGrad<
PoolProcess,
T>), dim3(grid), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream(),
nthreads, input_data, output_data, output_grad_data, input_grad_data,
input_channels, input_depth, input_height, input_width, output_depth,
output_height, output_width, ksize_depth, ksize_height, ksize_width,
stride_depth, stride_height, stride_width, padding_depth,
padding_height, padding_width, pool_process);
}
};
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <class T>
class MaxPool3dGradFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor& input_grad,
const framework::Tensor& output,
const framework::Tensor& output_grad, std::vector<int>& ksize,
std::vector<int>& strides, std::vector<int>& paddings) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool3DGrad<
T>), dim3(grid), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream(),
nthreads, input_data, output_data, output_grad_data, input_grad_data,
input_channels, input_depth, input_height, input_width, output_depth,
output_height, output_width, ksize_depth, ksize_height, ksize_width,
stride_depth, stride_height, stride_width, padding_depth,
padding_height, padding_width);
}
};
template class MaxPool3dGradFunctor<platform::GPUPlace, float>;
template class MaxPool3dGradFunctor<platform::GPUPlace, double>;
template class Pool3dFunctor<platform::GPUPlace,
paddle::operators::math::MaxPool<float>, float>;
template class Pool3dFunctor<platform::GPUPlace,
paddle::operators::math::AvgPool<float>, float>;
template class Pool3dGradFunctor<
platform::GPUPlace, paddle::operators::math::MaxPoolGrad<float>, float>;
template class Pool3dGradFunctor<
platform::GPUPlace, paddle::operators::math::AvgPoolGrad<float>, float>;
template class Pool3dFunctor<platform::GPUPlace,
paddle::operators::math::MaxPool<double>, double>;
template class Pool3dFunctor<platform::GPUPlace,
paddle::operators::math::AvgPool<double>, double>;
template class Pool3dGradFunctor<
platform::GPUPlace, paddle::operators::math::MaxPoolGrad<double>, double>;
template class Pool3dGradFunctor<
platform::GPUPlace, paddle::operators::math::AvgPoolGrad<double>, double>;
template <typename T>
__global__ void KernelMaxPool2dWithIdx(
const int nthreads, const T* input_data, T* output_data, T* mask_data,
const int channels, const int input_height, const int input_width,
const int output_height, const int output_width, const int ksize_height,
const int ksize_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int c = (index / output_width / output_height) % channels;
int batch_idx = index / output_width / output_height / channels;
int hstart = ph * stride_height - padding_height;
int hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
int wstart = pw * stride_width - padding_width;
int wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
input_data += (batch_idx * channels + c) * input_height * input_width;
T ele = -FLT_MAX;
int max_index = -1;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * input_width + w;
if (ele < input_data[input_index]) {
max_index = input_index;
ele = input_data[input_index];
}
}
}
output_data[index] = ele;
mask_data[index] = max_index;
}
}
template <typename T>
__global__ void KernelMaxPool2DWithIdxGrad(
const int nthreads, T* input_grad, const T* output_grad, const T* mask_data,
const int channels, const int input_height, const int input_width,
const int output_height, const int output_width, const int ksize_height,
const int ksize_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width;
int h_offset = (index / input_width) % input_height;
int c_offset = (index / input_width / input_height) % channels;
int batch_idx = index / input_width / input_height / channels;
int ph_start =
(h_offset + padding_height < ksize_height)
? 0
: (h_offset + padding_height - ksize_height) / stride_height + 1;
int pw_start =
(w_offset + padding_width < ksize_width)
? 0
: (w_offset + padding_width - ksize_width) / stride_width + 1;
int ph_end =
min((h_offset + padding_height) / stride_height + 1, output_height);
int pw_end =
min((w_offset + padding_width) / stride_width + 1, output_width);
T gradient = 0;
int input_current_featuremap_idx = h_offset * input_width + w_offset;
int output_idx =
(batch_idx * channels + c_offset) * output_height * output_width;
mask_data += output_idx;
output_grad += output_idx;
for (int ph = ph_start; ph < ph_end; ++ph) {
for (int pw = pw_start; pw < pw_end; ++pw) {
if (mask_data[ph * output_width + pw] == input_current_featuremap_idx)
gradient += output_grad[ph * output_width + pw];
}
}
input_grad[index] = gradient;
}
}
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T>
class MaxPool2dWithIndexFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor& output,
framework::Tensor& mask, std::vector<int>& ksize,
std::vector<int>& strides, std::vector<int>& paddings) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
T* output_data = output.mutable_data<T>(context.GetPlace());
T* mask_data = mask.mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool2dWithIdx<
T>), dim3(grid), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream(), nthreads, input_data, output_data, mask_data,
input_channels, input_height, input_width,
output_height, output_width, ksize_height,
ksize_width, stride_height, stride_width,
padding_height, padding_width);
}
};
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T>
class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
framework::Tensor& input_grad,
const framework::Tensor& output_grad,
const framework::Tensor& mask, std::vector<int>& ksize,
std::vector<int>& strides, std::vector<int>& paddings) {
const int batch_size = input_grad.dims()[0];
const int input_channels = input_grad.dims()[1];
const int input_height = input_grad.dims()[2];
const int input_width = input_grad.dims()[3];
const int output_height = output_grad.dims()[2];
const int output_width = output_grad.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* mask_data = mask.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
int nthreads = batch_size * input_channels * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool2DWithIdxGrad<
T>), dim3(grid), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream(), nthreads, input_grad_data, output_grad_data,
mask_data, input_channels, input_height,
input_width, output_height, output_width,
ksize_height, ksize_width, stride_height,
stride_width, padding_height, padding_width);
}
};
template class MaxPool2dWithIndexFunctor<platform::GPUPlace, float>;
template class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, float>;
template class MaxPool2dWithIndexFunctor<platform::GPUPlace, double>;
template class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, double>;
template <typename T>
__global__ void KernelMaxPool3DWithIdx(
const int nthreads, const T* input_data, T* output_data, T* mask_data,
const int channels, const int input_depth, const int input_height,
const int input_width, const int output_depth, const int output_height,
const int output_width, const int ksize_depth, const int ksize_height,
const int ksize_width, const int stride_depth, const int stride_height,
const int stride_width, const int padding_depth, const int padding_height,
const int padding_width) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int pd = (index / output_width / output_height) % output_depth;
int c = (index / output_width / output_height / output_depth) % channels;
int batch_idx =
index / output_width / output_height / output_depth / channels;
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
T ele = -FLT_MAX;
int max_index = -1;
input_data +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (ele < input_data[(d * input_height + h) * input_width + w]) {
max_index = (d * input_height + h) * input_width + w;
ele = input_data[max_index];
}
}
}
}
output_data[index] = ele;
mask_data[index] = max_index;
}
}
template <typename T>
__global__ void KernelMaxPool3DWithIdxGrad(
const int nthreads, T* input_grad, const T* output_grad, const T* mask,
const int channels, const int input_depth, const int input_height,
const int input_width, const int output_depth, const int output_height,
const int output_width, const int ksize_depth, const int ksize_height,
const int ksize_width, const int stride_depth, const int stride_height,
const int stride_width, const int padding_depth, const int padding_height,
const int padding_width) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width;
int h_offset = (index / input_width) % input_height;
int d_offset = (index / input_width / input_height) % input_depth;
int c_offset =
(index / input_width / input_height / input_depth) % channels;
int batch_idx = index / input_width / input_height / input_depth / channels;
int pd_start =
(d_offset + padding_depth < ksize_depth)
? 0
: (d_offset + padding_depth - ksize_depth) / stride_depth + 1;
int ph_start =
(h_offset + padding_height < ksize_height)
? 0
: (h_offset + padding_height - ksize_height) / stride_height + 1;
int pw_start =
(w_offset + padding_width < ksize_width)
? 0
: (w_offset + padding_width - ksize_width) / stride_width + 1;
int pd_end =
min((d_offset + padding_depth) / stride_depth + 1, output_depth);
int ph_end =
min((h_offset + padding_height) / stride_height + 1, output_height);
int pw_end =
min((w_offset + padding_width) / stride_width + 1, output_width);
T gradient = 0;
int input_current_feature_map_idx =
(d_offset * input_height + h_offset) * input_width + w_offset;
int output_idx = (batch_idx * channels + c_offset) * output_depth *
output_height * output_width;
mask += output_idx;
output_grad += output_idx;
for (int pd = pd_start; pd < pd_end; ++pd) {
for (int ph = ph_start; ph < ph_end; ++ph) {
for (int pw = pw_start; pw < pw_end; ++pw) {
if (mask[(pd * output_height + ph) * output_width + pw] ==
input_current_feature_map_idx)
gradient +=
output_grad[(pd * output_height + ph) * output_width + pw];
}
}
}
input_grad[index] = gradient;
}
}
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T>
class MaxPool3dWithIndexFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor& output,
framework::Tensor& mask, std::vector<int>& ksize,
std::vector<int>& strides, std::vector<int>& paddings) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
T* output_data = output.mutable_data<T>(context.GetPlace());
T* mask_data = mask.mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool3DWithIdx<
T>), dim3(grid), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream(),
nthreads, input_data, output_data, mask_data, input_channels,
input_depth, input_height, input_width, output_depth, output_height,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
stride_height, stride_width, padding_depth, padding_height,
padding_width);
}
};
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T>
class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
framework::Tensor& input_grad,
const framework::Tensor& output_grad,
const framework::Tensor& mask, std::vector<int>& ksize,
std::vector<int>& strides, std::vector<int>& paddings) {
const int batch_size = input_grad.dims()[0];
const int input_channels = input_grad.dims()[1];
const int input_depth = input_grad.dims()[2];
const int input_height = input_grad.dims()[3];
const int input_width = input_grad.dims()[4];
const int output_depth = output_grad.dims()[2];
const int output_height = output_grad.dims()[3];
const int output_width = output_grad.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* output_grad_data = output_grad.data<T>();
const T* mask_data = mask.data<T>();
T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool3DWithIdxGrad<
T>), dim3(grid), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream(),
nthreads, input_grad_data, output_grad_data, mask_data, input_channels,
input_depth, input_height, input_width, output_depth, output_height,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
stride_height, stride_width, padding_depth, padding_height,
padding_width);
}
};
template class MaxPool3dWithIndexFunctor<platform::GPUPlace, float>;
template class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, float>;
template class MaxPool3dWithIndexFunctor<platform::GPUPlace, double>;
template class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, double>;
} // namespace math
} // namespace operators
} // namespace paddle
| 6b53c2ade390b0ef0e1104a0a7ecec473d1c4081.cu | /* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/pooling.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace math {
template <typename PoolProcess, typename T>
__global__ void KernelPool2D(const int nthreads, const T* input_data,
T* output_data, const int channels,
const int input_height, const int input_width,
const int output_height, const int output_width,
const int ksize_height, const int ksize_width,
const int stride_height, const int stride_width,
const int padding_height, const int padding_width,
PoolProcess pool_process) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int c = (index / output_width / output_height) % channels;
int batch_idx = index / output_width / output_height / channels;
int hstart = ph * stride_height - padding_height;
int hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
int wstart = pw * stride_width - padding_width;
int wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
input_data += (batch_idx * channels + c) * input_height * input_width;
T ele = pool_process.initial();
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
pool_process.compute(ele, input_data[h * input_width + w]);
}
}
int pool_size = (hend - hstart) * (wend - wstart);
pool_process.finalize(ele, (static_cast<T>(pool_size)));
output_data[index] = ele;
}
}
template <typename PoolProcess, typename T>
__global__ void KernelPool2DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, T* input_grad, const int channels,
const int input_height, const int input_width, const int output_height,
const int output_width, const int ksize_height, const int ksize_width,
const int stride_height, const int stride_width, const int padding_height,
const int padding_width, PoolProcess pool_process) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int offsetW = index % input_width + padding_width;
int offsetH = (index / input_width) % input_height + padding_height;
int offsetC = (index / input_width / input_height) % channels;
int batch_idx = index / input_width / input_height / channels;
int phstart = (offsetH < ksize_height)
? 0
: (offsetH - ksize_height) / stride_height + 1;
int pwstart = (offsetW < ksize_width)
? 0
: (offsetW - ksize_width) / stride_width + 1;
int phend = min(offsetH / stride_height + 1, output_height);
int pwend = min(offsetW / stride_width + 1, output_width);
T gradient = 0;
T input = input_data[index];
int output_idx =
(batch_idx * channels + offsetC) * output_height * output_width;
output_data += output_idx;
output_grad += output_idx;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
int pool_size = (hend - hstart) * (wend - wstart);
int output_sub_idx = ph * output_width + pw;
pool_process.compute(input, output_data[output_sub_idx],
output_grad[output_sub_idx], gradient,
static_cast<T>(1.0 / pool_size));
}
}
input_grad[index] = gradient;
}
}
template <typename T>
__global__ void KernelMaxPool2DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, T* input_grad, const int channels,
const int input_height, const int input_width, const int output_height,
const int output_width, const int ksize_height, const int ksize_width,
const int stride_height, const int stride_width, const int padding_height,
const int padding_width) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int c = (index / output_width / output_height) % channels;
int batch_idx = index / output_width / output_height / channels;
int hstart = ph * stride_height - padding_height;
int hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
int wstart = pw * stride_width - padding_width;
int wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
input_data += (batch_idx * channels + c) * input_height * input_width;
input_grad += (batch_idx * channels + c) * input_height * input_width;
T ele = output_data[index];
int maxIndex = -1;
bool stop = false;
for (int h = hstart; h < hend && !stop; ++h) {
for (int w = wstart; w < wend && !stop; ++w) {
if (ele == input_data[h * input_width + w]) {
maxIndex = h * input_width + w;
stop = true;
}
}
}
if (maxIndex != -1) {
// atomic add
platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]);
}
}
}
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename PoolProcess, typename T>
class Pool2dFunctor<platform::GPUPlace, PoolProcess, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor& output,
std::vector<int>& ksize, std::vector<int>& strides,
std::vector<int>& paddings, PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
T* output_data = output.mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelPool2D<
PoolProcess,
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(nthreads, input_data, output_data, input_channels,
input_height, input_width, output_height,
output_width, ksize_height, ksize_width,
stride_height, stride_width, padding_height,
padding_width, pool_process);
}
};
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename PoolProcess, typename T>
class Pool2dGradFunctor<platform::GPUPlace, PoolProcess, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor& input_grad,
const framework::Tensor& output,
const framework::Tensor& output_grad, std::vector<int>& ksize,
std::vector<int>& strides, std::vector<int>& paddings,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
int nthreads = batch_size * input_channels * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelPool2DGrad<
PoolProcess,
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_grad_data,
input_channels, input_height, input_width, output_height, output_width,
ksize_height, ksize_width, stride_height, stride_width, padding_height,
padding_width, pool_process);
}
};
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T>
class MaxPool2dGradFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor& input_grad,
const framework::Tensor& output,
const framework::Tensor& output_grad, std::vector<int>& ksize,
std::vector<int>& strides, std::vector<int>& paddings) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool2DGrad<
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_grad_data,
input_channels, input_height, input_width, output_height, output_width,
ksize_height, ksize_width, stride_height, stride_width, padding_height,
padding_width);
}
};
template class MaxPool2dGradFunctor<platform::GPUPlace, float>;
template class MaxPool2dGradFunctor<platform::GPUPlace, double>;
template class Pool2dFunctor<platform::GPUPlace,
paddle::operators::math::MaxPool<float>, float>;
template class Pool2dFunctor<platform::GPUPlace,
paddle::operators::math::AvgPool<float>, float>;
template class Pool2dGradFunctor<
platform::GPUPlace, paddle::operators::math::MaxPoolGrad<float>, float>;
template class Pool2dGradFunctor<
platform::GPUPlace, paddle::operators::math::AvgPoolGrad<float>, float>;
template class Pool2dFunctor<platform::GPUPlace,
paddle::operators::math::MaxPool<double>, double>;
template class Pool2dFunctor<platform::GPUPlace,
paddle::operators::math::AvgPool<double>, double>;
template class Pool2dGradFunctor<
platform::GPUPlace, paddle::operators::math::MaxPoolGrad<double>, double>;
template class Pool2dGradFunctor<
platform::GPUPlace, paddle::operators::math::AvgPoolGrad<double>, double>;
template <typename PoolProcess, typename T>
__global__ void KernelPool3D(
const int nthreads, const T* input_data, T* output_data, const int channels,
const int input_depth, const int input_height, const int input_width,
const int output_depth, const int output_height, const int output_width,
const int ksize_depth, const int ksize_height, const int ksize_width,
const int stride_depth, const int stride_height, const int stride_width,
const int padding_depth, const int padding_height, const int padding_width,
PoolProcess pool_process) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int pd = (index / output_width / output_height) % output_depth;
int c = (index / output_width / output_height / output_depth) % channels;
int batch_idx =
index / output_width / output_height / output_depth / channels;
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
T ele = pool_process.initial();
input_data +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
pool_process.compute(
ele, input_data[(d * input_height + h) * input_width + w]);
}
}
}
int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart);
pool_process.finalize(ele, static_cast<T>(pool_size));
output_data[index] = ele;
}
}
template <typename PoolProcess, typename T>
__global__ void KernelPool3DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, T* input_grad, const int channels,
const int input_depth, const int input_height, const int input_width,
const int output_depth, const int output_height, const int output_width,
const int ksize_depth, const int ksize_height, const int ksize_width,
const int stride_depth, const int stride_height, const int stride_width,
const int padding_depth, const int padding_height, const int padding_width,
PoolProcess pool_process) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int offsetW = index % input_width + padding_width;
int offsetH = (index / input_width) % input_height + padding_height;
int offsetD =
(index / input_width / input_height) % input_depth + padding_depth;
int offsetC = (index / input_width / input_height / input_depth) % channels;
int batch_idx = index / input_width / input_height / input_depth / channels;
int pdstart = (offsetD < ksize_depth)
? 0
: (offsetD - ksize_depth) / stride_depth + 1;
int phstart = (offsetH < ksize_height)
? 0
: (offsetH - ksize_height) / stride_height + 1;
int pwstart = (offsetW < ksize_width)
? 0
: (offsetW - ksize_width) / stride_width + 1;
int pdend = min((offsetD) / stride_depth + 1, output_depth);
int phend = min((offsetH) / stride_height + 1, output_height);
int pwend = min((offsetW) / stride_width + 1, output_width);
T gradient = 0;
T input = input_data[index];
int output_idx = (batch_idx * channels + offsetC) * output_depth *
output_height * output_width;
output_data += output_idx;
output_grad += output_idx;
for (int pd = pdstart; pd < pdend; ++pd) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart);
int output_sub_idx = (pd * output_height + ph) * output_width + pw;
pool_process.compute(input, output_data[output_sub_idx],
output_grad[output_sub_idx], gradient,
static_cast<T>(1.0 / pool_size));
}
}
}
input_grad[index] = gradient;
}
}
template <typename T>
__global__ void KernelMaxPool3DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, T* input_grad, const int channels,
const int input_depth, const int input_height, const int input_width,
const int output_depth, const int output_height, const int output_width,
const int ksize_depth, const int ksize_height, const int ksize_width,
const int stride_depth, const int stride_height, const int stride_width,
const int padding_depth, const int padding_height,
const int padding_width) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int pd = (index / output_width / output_height) % output_depth;
int c = (index / output_width / output_height / output_depth) % channels;
int batch_idx =
index / output_width / output_height / output_depth / channels;
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
T ele = output_data[index];
bool stop = false;
int maxIdx = -1;
input_data +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
input_grad +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
for (int d = dstart; d < dend && !stop; ++d) {
for (int h = hstart; h < hend && !stop; ++h) {
for (int w = wstart; w < wend && !stop; ++w) {
if (ele == input_data[(d * input_height + h) * input_width + w]) {
stop = true;
maxIdx = (d * input_height + h) * input_width + w;
}
}
}
}
if (maxIdx != -1) {
// atomic add
platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]);
}
}
}
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename PoolProcess, class T>
class Pool3dFunctor<platform::GPUPlace, PoolProcess, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor& output,
std::vector<int>& ksize, std::vector<int>& strides,
std::vector<int>& paddings, PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
T* output_data = output.mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelPool3D<
PoolProcess,
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
nthreads, input_data, output_data, input_channels, input_depth,
input_height, input_width, output_depth, output_height, output_width,
ksize_depth, ksize_height, ksize_width, stride_depth, stride_height,
stride_width, padding_depth, padding_height, padding_width,
pool_process);
}
};
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename PoolProcess, class T>
class Pool3dGradFunctor<platform::GPUPlace, PoolProcess, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor& input_grad,
const framework::Tensor& output,
const framework::Tensor& output_grad, std::vector<int>& ksize,
std::vector<int>& strides, std::vector<int>& paddings,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelPool3DGrad<
PoolProcess,
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_grad_data,
input_channels, input_depth, input_height, input_width, output_depth,
output_height, output_width, ksize_depth, ksize_height, ksize_width,
stride_depth, stride_height, stride_width, padding_depth,
padding_height, padding_width, pool_process);
}
};
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <class T>
class MaxPool3dGradFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor& input_grad,
const framework::Tensor& output,
const framework::Tensor& output_grad, std::vector<int>& ksize,
std::vector<int>& strides, std::vector<int>& paddings) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool3DGrad<
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_grad_data,
input_channels, input_depth, input_height, input_width, output_depth,
output_height, output_width, ksize_depth, ksize_height, ksize_width,
stride_depth, stride_height, stride_width, padding_depth,
padding_height, padding_width);
}
};
template class MaxPool3dGradFunctor<platform::GPUPlace, float>;
template class MaxPool3dGradFunctor<platform::GPUPlace, double>;
template class Pool3dFunctor<platform::GPUPlace,
paddle::operators::math::MaxPool<float>, float>;
template class Pool3dFunctor<platform::GPUPlace,
paddle::operators::math::AvgPool<float>, float>;
template class Pool3dGradFunctor<
platform::GPUPlace, paddle::operators::math::MaxPoolGrad<float>, float>;
template class Pool3dGradFunctor<
platform::GPUPlace, paddle::operators::math::AvgPoolGrad<float>, float>;
template class Pool3dFunctor<platform::GPUPlace,
paddle::operators::math::MaxPool<double>, double>;
template class Pool3dFunctor<platform::GPUPlace,
paddle::operators::math::AvgPool<double>, double>;
template class Pool3dGradFunctor<
platform::GPUPlace, paddle::operators::math::MaxPoolGrad<double>, double>;
template class Pool3dGradFunctor<
platform::GPUPlace, paddle::operators::math::AvgPoolGrad<double>, double>;
template <typename T>
__global__ void KernelMaxPool2dWithIdx(
const int nthreads, const T* input_data, T* output_data, T* mask_data,
const int channels, const int input_height, const int input_width,
const int output_height, const int output_width, const int ksize_height,
const int ksize_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int c = (index / output_width / output_height) % channels;
int batch_idx = index / output_width / output_height / channels;
int hstart = ph * stride_height - padding_height;
int hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
int wstart = pw * stride_width - padding_width;
int wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
input_data += (batch_idx * channels + c) * input_height * input_width;
T ele = -FLT_MAX;
int max_index = -1;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * input_width + w;
if (ele < input_data[input_index]) {
max_index = input_index;
ele = input_data[input_index];
}
}
}
output_data[index] = ele;
mask_data[index] = max_index;
}
}
template <typename T>
__global__ void KernelMaxPool2DWithIdxGrad(
const int nthreads, T* input_grad, const T* output_grad, const T* mask_data,
const int channels, const int input_height, const int input_width,
const int output_height, const int output_width, const int ksize_height,
const int ksize_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width;
int h_offset = (index / input_width) % input_height;
int c_offset = (index / input_width / input_height) % channels;
int batch_idx = index / input_width / input_height / channels;
int ph_start =
(h_offset + padding_height < ksize_height)
? 0
: (h_offset + padding_height - ksize_height) / stride_height + 1;
int pw_start =
(w_offset + padding_width < ksize_width)
? 0
: (w_offset + padding_width - ksize_width) / stride_width + 1;
int ph_end =
min((h_offset + padding_height) / stride_height + 1, output_height);
int pw_end =
min((w_offset + padding_width) / stride_width + 1, output_width);
T gradient = 0;
int input_current_featuremap_idx = h_offset * input_width + w_offset;
int output_idx =
(batch_idx * channels + c_offset) * output_height * output_width;
mask_data += output_idx;
output_grad += output_idx;
for (int ph = ph_start; ph < ph_end; ++ph) {
for (int pw = pw_start; pw < pw_end; ++pw) {
if (mask_data[ph * output_width + pw] == input_current_featuremap_idx)
gradient += output_grad[ph * output_width + pw];
}
}
input_grad[index] = gradient;
}
}
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T>
class MaxPool2dWithIndexFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor& output,
framework::Tensor& mask, std::vector<int>& ksize,
std::vector<int>& strides, std::vector<int>& paddings) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
T* output_data = output.mutable_data<T>(context.GetPlace());
T* mask_data = mask.mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool2dWithIdx<
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(nthreads, input_data, output_data, mask_data,
input_channels, input_height, input_width,
output_height, output_width, ksize_height,
ksize_width, stride_height, stride_width,
padding_height, padding_width);
}
};
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T>
class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
framework::Tensor& input_grad,
const framework::Tensor& output_grad,
const framework::Tensor& mask, std::vector<int>& ksize,
std::vector<int>& strides, std::vector<int>& paddings) {
const int batch_size = input_grad.dims()[0];
const int input_channels = input_grad.dims()[1];
const int input_height = input_grad.dims()[2];
const int input_width = input_grad.dims()[3];
const int output_height = output_grad.dims()[2];
const int output_width = output_grad.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* mask_data = mask.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
int nthreads = batch_size * input_channels * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool2DWithIdxGrad<
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(nthreads, input_grad_data, output_grad_data,
mask_data, input_channels, input_height,
input_width, output_height, output_width,
ksize_height, ksize_width, stride_height,
stride_width, padding_height, padding_width);
}
};
template class MaxPool2dWithIndexFunctor<platform::GPUPlace, float>;
template class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, float>;
template class MaxPool2dWithIndexFunctor<platform::GPUPlace, double>;
template class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, double>;
template <typename T>
__global__ void KernelMaxPool3DWithIdx(
const int nthreads, const T* input_data, T* output_data, T* mask_data,
const int channels, const int input_depth, const int input_height,
const int input_width, const int output_depth, const int output_height,
const int output_width, const int ksize_depth, const int ksize_height,
const int ksize_width, const int stride_depth, const int stride_height,
const int stride_width, const int padding_depth, const int padding_height,
const int padding_width) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int pd = (index / output_width / output_height) % output_depth;
int c = (index / output_width / output_height / output_depth) % channels;
int batch_idx =
index / output_width / output_height / output_depth / channels;
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
T ele = -FLT_MAX;
int max_index = -1;
input_data +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (ele < input_data[(d * input_height + h) * input_width + w]) {
max_index = (d * input_height + h) * input_width + w;
ele = input_data[max_index];
}
}
}
}
output_data[index] = ele;
mask_data[index] = max_index;
}
}
template <typename T>
__global__ void KernelMaxPool3DWithIdxGrad(
const int nthreads, T* input_grad, const T* output_grad, const T* mask,
const int channels, const int input_depth, const int input_height,
const int input_width, const int output_depth, const int output_height,
const int output_width, const int ksize_depth, const int ksize_height,
const int ksize_width, const int stride_depth, const int stride_height,
const int stride_width, const int padding_depth, const int padding_height,
const int padding_width) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width;
int h_offset = (index / input_width) % input_height;
int d_offset = (index / input_width / input_height) % input_depth;
int c_offset =
(index / input_width / input_height / input_depth) % channels;
int batch_idx = index / input_width / input_height / input_depth / channels;
int pd_start =
(d_offset + padding_depth < ksize_depth)
? 0
: (d_offset + padding_depth - ksize_depth) / stride_depth + 1;
int ph_start =
(h_offset + padding_height < ksize_height)
? 0
: (h_offset + padding_height - ksize_height) / stride_height + 1;
int pw_start =
(w_offset + padding_width < ksize_width)
? 0
: (w_offset + padding_width - ksize_width) / stride_width + 1;
int pd_end =
min((d_offset + padding_depth) / stride_depth + 1, output_depth);
int ph_end =
min((h_offset + padding_height) / stride_height + 1, output_height);
int pw_end =
min((w_offset + padding_width) / stride_width + 1, output_width);
T gradient = 0;
int input_current_feature_map_idx =
(d_offset * input_height + h_offset) * input_width + w_offset;
int output_idx = (batch_idx * channels + c_offset) * output_depth *
output_height * output_width;
mask += output_idx;
output_grad += output_idx;
for (int pd = pd_start; pd < pd_end; ++pd) {
for (int ph = ph_start; ph < ph_end; ++ph) {
for (int pw = pw_start; pw < pw_end; ++pw) {
if (mask[(pd * output_height + ph) * output_width + pw] ==
input_current_feature_map_idx)
gradient +=
output_grad[(pd * output_height + ph) * output_width + pw];
}
}
}
input_grad[index] = gradient;
}
}
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T>
class MaxPool3dWithIndexFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor& output,
framework::Tensor& mask, std::vector<int>& ksize,
std::vector<int>& strides, std::vector<int>& paddings) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
T* output_data = output.mutable_data<T>(context.GetPlace());
T* mask_data = mask.mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool3DWithIdx<
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
nthreads, input_data, output_data, mask_data, input_channels,
input_depth, input_height, input_width, output_depth, output_height,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
stride_height, stride_width, padding_depth, padding_height,
padding_width);
}
};
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T>
class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
framework::Tensor& input_grad,
const framework::Tensor& output_grad,
const framework::Tensor& mask, std::vector<int>& ksize,
std::vector<int>& strides, std::vector<int>& paddings) {
const int batch_size = input_grad.dims()[0];
const int input_channels = input_grad.dims()[1];
const int input_depth = input_grad.dims()[2];
const int input_height = input_grad.dims()[3];
const int input_width = input_grad.dims()[4];
const int output_depth = output_grad.dims()[2];
const int output_height = output_grad.dims()[3];
const int output_width = output_grad.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* output_grad_data = output_grad.data<T>();
const T* mask_data = mask.data<T>();
T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool3DWithIdxGrad<
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
nthreads, input_grad_data, output_grad_data, mask_data, input_channels,
input_depth, input_height, input_width, output_depth, output_height,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
stride_height, stride_width, padding_depth, padding_height,
padding_width);
}
};
template class MaxPool3dWithIndexFunctor<platform::GPUPlace, float>;
template class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, float>;
template class MaxPool3dWithIndexFunctor<platform::GPUPlace, double>;
template class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
42e422ed3e31e1f8ed0463974e27f0d2ea2b883a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <cstdio>
#include <hip/hip_cooperative_groups.h>
#define P(i, j) ((i) * nx + (j))
void allocate_2d(float *&a, int nx, int ny){
hipMallocManaged(&a, nx*ny*sizeof(float));
}
__global__ void build_up_b(float *b, float rho, float dt, float *u, float *v, float dx, float dy, int nx, int ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < 1 || i >= ny-1 || j < 1 || j >= nx-1){
return;
}
b[P(i, j)] = (
rho * (
1 / dt * (
(
u[P(i, j+1)] - u[P(i, j-1)]
) / (
2 * dx
) + (
v[P(i+1, j)] - v[P(i-1, j)]
) / (
2 * dy
)
) - (
(
u[P(i, j+1)] - u[P(i, j-1)]
) / (
2 * dx
)
) * (
(
u[P(i, j+1)] - u[P(i, j-1)]
) / (
2 * dx
)
) - 2 * (
(
u[P(i+1, j)] - u[P(i-1, j)]
) / (
2 * dy
) * (
v[P(i, j+1)] - v[P(i, j-1)]
) / (
2 * dx
)
) - (
(
v[P(i+1, j)] - v[P(i-1, j)]
) / (
2 * dy
)
) * (
(
v[P(i+1, j)] - v[P(i-1, j)]
) / (
2 * dy
)
)
)
);
}
__global__ void copy(float *a, float *b, int nx, int ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < 0 || i >= ny || j < 0 || j >= nx){
return;
}
b[P(i, j)] = a[P(i, j)];
}
__global__ void pressure_poisson_step(float *p, float *pn, float dx, float dy, float *b, int nx, int ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < 1 || i >= ny-1 || j < 1 || j >= nx-1){
return;
}
p[P(i, j)] = (
(
(
pn[P(i, j+1)] + pn[P(i, j-1)]
) * dy * dy + (
pn[P(i+1, j)] + pn[P(i-1, j)]
) * dx * dx
) / (
2 * (
dx * dx + dy * dy
)
) - dx * dx * dy * dy / (
2 * (
dx * dx + dy * dy
)
) * b[P(i, j)]
);
}
void pressure_poisson(float *p, float *pn, float dx, float dy, float *b, int nit, int nx, int ny){
for(int i = 0;i < nit;i++){
dim3 grid = dim3(2, 2);
dim3 block = dim3(32, 32);
hipLaunchKernelGGL(( copy), dim3(grid),dim3(block), 0, 0, p, pn, nx, ny);
hipLaunchKernelGGL(( pressure_poisson_step), dim3(grid),dim3(block), 0, 0, p, pn, dx, dy, b, nx, ny);
hipDeviceSynchronize();
for(int i = 0;i < ny;i++){
p[P(i, nx-1)] = p[P(i, nx-2)];
}
for(int i = 0;i < nx;i++){
p[P(0, i)] = p[P(1, i)];
}
for(int i = 0;i < ny;i++){
p[P(i, 0)] = p[P(i, 1)];
}
for(int i = 0;i < nx;i++){
p[P(ny-1, i)] = 0;
}
}
}
__global__ void update_u(float *u, float *v, float *un, float *vn, float dt, float dx, float dy, float rho, float nu, float *p, int nx, int ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < 1 || i >= ny-1 || j < 1 || j >= nx-1){
return;
}
u[P(i, j)] = (
un[P(i, j)] - un[P(i, j)] * dt / dx * (
un[P(i, j)] - un[P(i, j-1)]
) - vn[P(i, j)] * dt / dy * (
un[P(i, j)] - un[P(i-1, j)]
) - dt / (
2 * rho * dx
) * (
p[P(i, j+1)] - p[P(i, j-1)]
) + nu * (
dt / (dx * dx) * (
un[P(i, j+1)] - 2 * un[P(i, j)] + un[P(i, j-1)]
) + dt / (dy * dy) * (
un[P(i+1, j)] - 2 * un[P(i, j)] + un[P(i-1, j)]
)
)
);
}
__global__ void update_v(float *u, float *v, float *un, float *vn, float dt, float dx, float dy, float rho, float nu, float *p, int nx, int ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < 1 || i >= ny-1 || j < 1 || j >= nx-1){
return;
}
v[P(i, j)] = (
vn[P(i, j)] - un[P(i, j)] * dt / dx * (
vn[P(i, j)] - vn[P(i, j-1)]
) - vn[P(i, j)] * dt / dy * (
vn[P(i, j)] - vn[P(i-1, j)]
) - dt / (
2 * rho * dy
) * (
p[P(i+1, j)] - p[P(i-1, j)]
) + nu * (
dt / (dx * dx) * (
vn[P(i, j+1)] - 2 * vn[P(i, j)] + vn[P(i, j-1)]
) + dt / (dy * dy) * (
vn[P(i+1, j)] - 2 * vn[P(i, j)] + vn[P(i-1, j)]
)
)
);
}
void boarder(float *u, float *v, int nx, int ny){
for(int i = 0;i < nx;i++){
u[P(0, i)] = 0;
}
for(int i = 0;i < ny;i++){
u[P(i, 0)] = 0;
}
for(int i = 0;i < ny;i++){
u[P(i, nx-1)] = 0;
}
for(int i = 0;i < nx;i++){
u[P(ny-1, i)] = 1;
}
for(int i = 0;i < nx;i++){
v[P(0, i)] = 0;
}
for(int i = 0;i < nx;i++){
v[P(ny-1, i)] = 0;
}
for(int i = 0;i < ny;i++){
v[P(i, 0)] = 0;
}
for(int i = 0;i < ny;i++){
v[P(i, nx-1)] = 0;
}
}
void print2d(float *a, int nx, int ny){
for(int i = 0;i < ny;i++){
for(int j = 0;j < nx;j++){
printf("%g\n", a[P(i, j)]);
}
}
}
void cavity_flow_step(float *u, float *un, float *v, float *vn, float dt, float dx, float dy, float *p, float *pn, float *b, float rho, float nu, int nx, int ny, int nit){
dim3 grid = dim3(2, 2);
dim3 block = dim3(32, 32);
hipLaunchKernelGGL(( build_up_b), dim3(grid),dim3(block), 0, 0, b, rho, dt, u, v, dx, dy, nx, ny);
hipDeviceSynchronize();
pressure_poisson(p, pn, dx, dy, b, nit, nx, ny);
hipDeviceSynchronize();
hipLaunchKernelGGL(( copy), dim3(grid),dim3(block), 0, 0, u, un, nx, ny);
hipLaunchKernelGGL(( copy), dim3(grid),dim3(block), 0, 0, v, vn, nx, ny);
hipLaunchKernelGGL(( update_u), dim3(grid),dim3(block), 0, 0, u, v, un, vn, dt, dx, dy, rho, nu, p, nx, ny);
hipLaunchKernelGGL(( update_v), dim3(grid),dim3(block), 0, 0, u, v, un, vn, dt, dx, dy, rho, nu, p, nx, ny);
hipDeviceSynchronize();
boarder(u, v, nx, ny);
hipDeviceSynchronize();
}
void cavity_flow(int nt, float *u, float *un, float *v, float *vn, float dt, float dx, float dy, float *p, float *pn, float *b, float rho, float nu, int nx, int ny, int nit){
for(int i = 0;i < nt;i++){
cavity_flow_step(u, un, v, vn, dt, dx, dy, p, pn, b, rho, nu, nx, ny, nit);
}
}
int main(){
int nx = 41;
int ny = 41;
int nt = 700;
int nit = 50;
float cx = 2;
float cy = 2;
float dx = cx / (nx - 1);
float dy = cy / (ny - 1);
float rho = 1;
float nu = 0.1;
float dt = 0.001;
float *u;
allocate_2d(u, nx, ny);
float *v;
allocate_2d(v, nx, ny);
float *p;
allocate_2d(p, nx, ny);
float *b;
allocate_2d(b, nx, ny);
float *un;
allocate_2d(un, nx, ny);
float *vn;
allocate_2d(vn, nx, ny);
float *pn;
allocate_2d(pn, nx, ny);
for(int i = 0;i < ny;i++){
for(int j = 0;j < nx;j++){
u[P(i, j)] = v[P(i, j)] = p[P(i, j)] = b[P(i, j)] = 0;
}
}
cavity_flow(nt, u, un, v, vn, dt, dx, dy, p, pn, b, rho, nu, nx, ny, nit);
hipDeviceSynchronize();
print2d(u, nx, ny);
print2d(v, nx, ny);
print2d(p, nx, ny);
}
| 42e422ed3e31e1f8ed0463974e27f0d2ea2b883a.cu | #include <cstdlib>
#include <cstdio>
#include <cooperative_groups.h>
#define P(i, j) ((i) * nx + (j))
void allocate_2d(float *&a, int nx, int ny){
cudaMallocManaged(&a, nx*ny*sizeof(float));
}
__global__ void build_up_b(float *b, float rho, float dt, float *u, float *v, float dx, float dy, int nx, int ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < 1 || i >= ny-1 || j < 1 || j >= nx-1){
return;
}
b[P(i, j)] = (
rho * (
1 / dt * (
(
u[P(i, j+1)] - u[P(i, j-1)]
) / (
2 * dx
) + (
v[P(i+1, j)] - v[P(i-1, j)]
) / (
2 * dy
)
) - (
(
u[P(i, j+1)] - u[P(i, j-1)]
) / (
2 * dx
)
) * (
(
u[P(i, j+1)] - u[P(i, j-1)]
) / (
2 * dx
)
) - 2 * (
(
u[P(i+1, j)] - u[P(i-1, j)]
) / (
2 * dy
) * (
v[P(i, j+1)] - v[P(i, j-1)]
) / (
2 * dx
)
) - (
(
v[P(i+1, j)] - v[P(i-1, j)]
) / (
2 * dy
)
) * (
(
v[P(i+1, j)] - v[P(i-1, j)]
) / (
2 * dy
)
)
)
);
}
__global__ void copy(float *a, float *b, int nx, int ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < 0 || i >= ny || j < 0 || j >= nx){
return;
}
b[P(i, j)] = a[P(i, j)];
}
__global__ void pressure_poisson_step(float *p, float *pn, float dx, float dy, float *b, int nx, int ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < 1 || i >= ny-1 || j < 1 || j >= nx-1){
return;
}
p[P(i, j)] = (
(
(
pn[P(i, j+1)] + pn[P(i, j-1)]
) * dy * dy + (
pn[P(i+1, j)] + pn[P(i-1, j)]
) * dx * dx
) / (
2 * (
dx * dx + dy * dy
)
) - dx * dx * dy * dy / (
2 * (
dx * dx + dy * dy
)
) * b[P(i, j)]
);
}
void pressure_poisson(float *p, float *pn, float dx, float dy, float *b, int nit, int nx, int ny){
for(int i = 0;i < nit;i++){
dim3 grid = dim3(2, 2);
dim3 block = dim3(32, 32);
copy<<<grid,block>>>(p, pn, nx, ny);
pressure_poisson_step<<<grid,block>>>(p, pn, dx, dy, b, nx, ny);
cudaDeviceSynchronize();
for(int i = 0;i < ny;i++){
p[P(i, nx-1)] = p[P(i, nx-2)];
}
for(int i = 0;i < nx;i++){
p[P(0, i)] = p[P(1, i)];
}
for(int i = 0;i < ny;i++){
p[P(i, 0)] = p[P(i, 1)];
}
for(int i = 0;i < nx;i++){
p[P(ny-1, i)] = 0;
}
}
}
__global__ void update_u(float *u, float *v, float *un, float *vn, float dt, float dx, float dy, float rho, float nu, float *p, int nx, int ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < 1 || i >= ny-1 || j < 1 || j >= nx-1){
return;
}
u[P(i, j)] = (
un[P(i, j)] - un[P(i, j)] * dt / dx * (
un[P(i, j)] - un[P(i, j-1)]
) - vn[P(i, j)] * dt / dy * (
un[P(i, j)] - un[P(i-1, j)]
) - dt / (
2 * rho * dx
) * (
p[P(i, j+1)] - p[P(i, j-1)]
) + nu * (
dt / (dx * dx) * (
un[P(i, j+1)] - 2 * un[P(i, j)] + un[P(i, j-1)]
) + dt / (dy * dy) * (
un[P(i+1, j)] - 2 * un[P(i, j)] + un[P(i-1, j)]
)
)
);
}
__global__ void update_v(float *u, float *v, float *un, float *vn, float dt, float dx, float dy, float rho, float nu, float *p, int nx, int ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < 1 || i >= ny-1 || j < 1 || j >= nx-1){
return;
}
v[P(i, j)] = (
vn[P(i, j)] - un[P(i, j)] * dt / dx * (
vn[P(i, j)] - vn[P(i, j-1)]
) - vn[P(i, j)] * dt / dy * (
vn[P(i, j)] - vn[P(i-1, j)]
) - dt / (
2 * rho * dy
) * (
p[P(i+1, j)] - p[P(i-1, j)]
) + nu * (
dt / (dx * dx) * (
vn[P(i, j+1)] - 2 * vn[P(i, j)] + vn[P(i, j-1)]
) + dt / (dy * dy) * (
vn[P(i+1, j)] - 2 * vn[P(i, j)] + vn[P(i-1, j)]
)
)
);
}
void boarder(float *u, float *v, int nx, int ny){
for(int i = 0;i < nx;i++){
u[P(0, i)] = 0;
}
for(int i = 0;i < ny;i++){
u[P(i, 0)] = 0;
}
for(int i = 0;i < ny;i++){
u[P(i, nx-1)] = 0;
}
for(int i = 0;i < nx;i++){
u[P(ny-1, i)] = 1;
}
for(int i = 0;i < nx;i++){
v[P(0, i)] = 0;
}
for(int i = 0;i < nx;i++){
v[P(ny-1, i)] = 0;
}
for(int i = 0;i < ny;i++){
v[P(i, 0)] = 0;
}
for(int i = 0;i < ny;i++){
v[P(i, nx-1)] = 0;
}
}
void print2d(float *a, int nx, int ny){
for(int i = 0;i < ny;i++){
for(int j = 0;j < nx;j++){
printf("%g\n", a[P(i, j)]);
}
}
}
void cavity_flow_step(float *u, float *un, float *v, float *vn, float dt, float dx, float dy, float *p, float *pn, float *b, float rho, float nu, int nx, int ny, int nit){
dim3 grid = dim3(2, 2);
dim3 block = dim3(32, 32);
build_up_b<<<grid,block>>>(b, rho, dt, u, v, dx, dy, nx, ny);
cudaDeviceSynchronize();
pressure_poisson(p, pn, dx, dy, b, nit, nx, ny);
cudaDeviceSynchronize();
copy<<<grid,block>>>(u, un, nx, ny);
copy<<<grid,block>>>(v, vn, nx, ny);
update_u<<<grid,block>>>(u, v, un, vn, dt, dx, dy, rho, nu, p, nx, ny);
update_v<<<grid,block>>>(u, v, un, vn, dt, dx, dy, rho, nu, p, nx, ny);
cudaDeviceSynchronize();
boarder(u, v, nx, ny);
cudaDeviceSynchronize();
}
void cavity_flow(int nt, float *u, float *un, float *v, float *vn, float dt, float dx, float dy, float *p, float *pn, float *b, float rho, float nu, int nx, int ny, int nit){
for(int i = 0;i < nt;i++){
cavity_flow_step(u, un, v, vn, dt, dx, dy, p, pn, b, rho, nu, nx, ny, nit);
}
}
int main(){
int nx = 41;
int ny = 41;
int nt = 700;
int nit = 50;
float cx = 2;
float cy = 2;
float dx = cx / (nx - 1);
float dy = cy / (ny - 1);
float rho = 1;
float nu = 0.1;
float dt = 0.001;
float *u;
allocate_2d(u, nx, ny);
float *v;
allocate_2d(v, nx, ny);
float *p;
allocate_2d(p, nx, ny);
float *b;
allocate_2d(b, nx, ny);
float *un;
allocate_2d(un, nx, ny);
float *vn;
allocate_2d(vn, nx, ny);
float *pn;
allocate_2d(pn, nx, ny);
for(int i = 0;i < ny;i++){
for(int j = 0;j < nx;j++){
u[P(i, j)] = v[P(i, j)] = p[P(i, j)] = b[P(i, j)] = 0;
}
}
cavity_flow(nt, u, un, v, vn, dt, dx, dy, p, pn, b, rho, nu, nx, ny, nit);
cudaDeviceSynchronize();
print2d(u, nx, ny);
print2d(v, nx, ny);
print2d(p, nx, ny);
}
|
074da87ef89dc3c9bd766feacd9d769b9730c2c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This is a simple MPI+CUDA program that does the following:
//
// 1. On the each host, fill an array with consecutive numbers, starting at 0
// for the first host, incrementing up to some amount, and then incrementing
// from there up across each host.
// 2. Copy the array from the each host to its device.
// 3. On the device, multiply each number in the array by 2.
// 4. Copy the array from the device to the host.
// 5. Gather the arrays from each host onto a single host.
// 6. On that host, print the result.
//
// Author: Aaron Weeden, Shodor, 2016
// Import library so we can call printf()
#include <stdio.h>
// Import library so we can call exit() and use EXIT_FAILURE, as well as call
// malloc() and free()
#include <stdlib.h>
// Import library with model parameters
#include "params.h"
// Define the number of CUDA threads in each CUDA warp (group of threads that
// execute instructions in lock-step)
#define THREADS_PER_WARP 32
// Define the maximum number of CUDA warps in each CUDA block
#define MAX_WARPS_PER_BLOCK 16
// Define the number of CUDA threads in each CUDA block
#define THREADS_PER_BLOCK ((THREADS_PER_WARP) * (MAX_WARPS_PER_BLOCK))
// Define the number of CUDA blocks in each CUDA grid
#define BLOCKS_PER_GRID 1
// Declare variable for device memory
extern int * HostNums;
int * DeviceNums;
// Declare functions that will be defined later
void TryCuda(hipError_t const err);
__global__ void MultBy2(int * const DeviceNums, int const count);
extern "C"
{
void AllocateDeviceMemory()
{
// Allocate memory for the device array
TryCuda(hipMalloc((void**)&DeviceNums, BYTE_COUNT));
}
void MultBy2OnDevice()
{
// Copy the array from each host to its device
TryCuda(hipMemcpy(DeviceNums, HostNums, BYTE_COUNT,
hipMemcpyHostToDevice));
// On the device, multiply each number in the array by 2
hipLaunchKernelGGL(( MultBy2), dim3(BLOCKS_PER_GRID), dim3(THREADS_PER_BLOCK), 0, 0, DeviceNums, NUM_COUNT);
// Copy the array from the device to the host
TryCuda(hipMemcpy(HostNums, DeviceNums, BYTE_COUNT,
hipMemcpyDeviceToHost));
}
void FreeDeviceMemory()
{
// De-allocate memory for the device array
TryCuda(hipFree(DeviceNums));
}
}
// Define a function to check whether a CUDA call was successful
void TryCuda(hipError_t const err)
{
if (err != hipSuccess)
{
fprintf(stderr, "CUDA Error: %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// Define a function which will be executed on a CUDA device
__global__ void MultBy2(int * const DeviceNums, int const count)
{
// Calculate the unique ID for the current CUDA thread
int const threadId = blockIdx.x * blockDim.x + threadIdx.x;
// All threads whose thread ID is >= count will NOT do the following, thus
// avoiding writing into un-allocated space.
if (threadId < count)
{
// The current thread indexes the device array using its thread ID and
// multiplies that element by 2.
DeviceNums[threadId] *= 2;
}
}
| 074da87ef89dc3c9bd766feacd9d769b9730c2c7.cu | // This is a simple MPI+CUDA program that does the following:
//
// 1. On the each host, fill an array with consecutive numbers, starting at 0
// for the first host, incrementing up to some amount, and then incrementing
// from there up across each host.
// 2. Copy the array from the each host to its device.
// 3. On the device, multiply each number in the array by 2.
// 4. Copy the array from the device to the host.
// 5. Gather the arrays from each host onto a single host.
// 6. On that host, print the result.
//
// Author: Aaron Weeden, Shodor, 2016
// Import library so we can call printf()
#include <stdio.h>
// Import library so we can call exit() and use EXIT_FAILURE, as well as call
// malloc() and free()
#include <stdlib.h>
// Import library with model parameters
#include "params.h"
// Define the number of CUDA threads in each CUDA warp (group of threads that
// execute instructions in lock-step)
#define THREADS_PER_WARP 32
// Define the maximum number of CUDA warps in each CUDA block
#define MAX_WARPS_PER_BLOCK 16
// Define the number of CUDA threads in each CUDA block
#define THREADS_PER_BLOCK ((THREADS_PER_WARP) * (MAX_WARPS_PER_BLOCK))
// Define the number of CUDA blocks in each CUDA grid
#define BLOCKS_PER_GRID 1
// Declare variable for device memory
extern int * HostNums;
int * DeviceNums;
// Declare functions that will be defined later
void TryCuda(cudaError_t const err);
__global__ void MultBy2(int * const DeviceNums, int const count);
extern "C"
{
void AllocateDeviceMemory()
{
// Allocate memory for the device array
TryCuda(cudaMalloc((void**)&DeviceNums, BYTE_COUNT));
}
void MultBy2OnDevice()
{
// Copy the array from each host to its device
TryCuda(cudaMemcpy(DeviceNums, HostNums, BYTE_COUNT,
cudaMemcpyHostToDevice));
// On the device, multiply each number in the array by 2
MultBy2<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(DeviceNums, NUM_COUNT);
// Copy the array from the device to the host
TryCuda(cudaMemcpy(HostNums, DeviceNums, BYTE_COUNT,
cudaMemcpyDeviceToHost));
}
void FreeDeviceMemory()
{
// De-allocate memory for the device array
TryCuda(cudaFree(DeviceNums));
}
}
// Define a function to check whether a CUDA call was successful
void TryCuda(cudaError_t const err)
{
if (err != cudaSuccess)
{
fprintf(stderr, "CUDA Error: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// Define a function which will be executed on a CUDA device
__global__ void MultBy2(int * const DeviceNums, int const count)
{
// Calculate the unique ID for the current CUDA thread
int const threadId = blockIdx.x * blockDim.x + threadIdx.x;
// All threads whose thread ID is >= count will NOT do the following, thus
// avoiding writing into un-allocated space.
if (threadId < count)
{
// The current thread indexes the device array using its thread ID and
// multiplies that element by 2.
DeviceNums[threadId] *= 2;
}
}
|
9d46c6b784b73c767365e43a51c43fcb5dcaef7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda_runtime.h>
#include<stdio.h>
__device__ float devData;
__global__ void checkGlobalVariable(){
printf("Device: the value of the global variable is %f\n", devData);
devData += 2.0f;
}
int main(){
float value = 3.14f;
// hipMemcpyToSymbol(devData, &value, sizeof(float));
float *dptr = NULL;
hipGetSymbolAddress((void**)&dptr, devData); // Symbol,
hipMemcpy(dptr, &value, sizeof(float), hipMemcpyHostToDevice);
printf("Host: copied %f to the global variable\n", value);
hipLaunchKernelGGL(( checkGlobalVariable), dim3(1), dim3(1), 0, 0, );
hipMemcpyFromSymbol(&value, devData, sizeof(float));
printf("Host: the value changed by the kernel to %f\n", value);
hipDeviceReset();
return EXIT_SUCCESS;
} | 9d46c6b784b73c767365e43a51c43fcb5dcaef7b.cu | #include<cuda_runtime.h>
#include<stdio.h>
__device__ float devData;
__global__ void checkGlobalVariable(){
printf("Device: the value of the global variable is %f\n", devData);
devData += 2.0f;
}
int main(){
float value = 3.14f;
// cudaMemcpyToSymbol(devData, &value, sizeof(float));
float *dptr = NULL;
cudaGetSymbolAddress((void**)&dptr, devData); // Symbol不是地址,本句获取其全局地址
cudaMemcpy(dptr, &value, sizeof(float), cudaMemcpyHostToDevice);
printf("Host: copied %f to the global variable\n", value);
checkGlobalVariable<<<1, 1>>>();
cudaMemcpyFromSymbol(&value, devData, sizeof(float));
printf("Host: the value changed by the kernel to %f\n", value);
cudaDeviceReset();
return EXIT_SUCCESS;
} |
321ccd49bdf7fea01d3aa7614f21784be2082fc5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include "utils/utils.h"
#define BUF_2M (2 * 1024 * 1024)
#define BUF_32M (32 * 1024 * 1024)
int main(void) {
hipSetDevice(0);
int *host_array_a = 0;
int *host_array_b = 0;
int *device_array_a = 0;
int *device_array_b = 0;
int *device_array_c = 0;
int num_bytes = BUF_32M * sizeof(int);
// TODO 1: Allocate the host's arrays with the specified number of elements:
// host_array_a => 32M
// host_array_b => 32M
host_array_a = (int *)malloc(num_bytes);
host_array_b = (int *)malloc(num_bytes);
// TODO 2: Allocate the device's arrays with the specified number of elements:
// device_array_a => 32M
// device_array_b => 32M
// device_array_c => 2M
hipMalloc((void **) &device_array_a, num_bytes);
hipMalloc((void **) &device_array_b, num_bytes);
hipMalloc((void **) &device_array_c, num_bytes);
// Check for allocation errors
if (host_array_a == 0 || host_array_b == 0 ||
device_array_a == 0 || device_array_b == 0 ||
device_array_c == 0) {
printf("[*] Error!\n");
return 1;
}
for (int i = 0; i < BUF_32M; ++i) {
host_array_a[i] = i % 32;
host_array_b[i] = i % 2;
}
printf("Before swap:\n");
printf("a[i]\tb[i]\n");
for (int i = 0; i < 10; ++i) {
printf("%d\t%d\n", host_array_a[i], host_array_b[i]);
}
// TODO 3: Copy from host to device
hipMemcpy(device_array_a, host_array_a, num_bytes, hipMemcpyHostToDevice);
hipMemcpy(device_array_b, host_array_b, num_bytes, hipMemcpyHostToDevice);
// TODO 4: Swap the buffers (BUF_2M values each iteration)
// Hint 1: device_array_c should be used as a temporary buffer
// Hint 2: hipMemcpy
for (int i = 0; i < BUF_32M; i += BUF_2M) {
hipMemcpy(device_array_c, device_array_a + i, BUF_2M * sizeof(int), hipMemcpyDeviceToDevice);
hipMemcpy(device_array_a + i, device_array_b + i, BUF_2M * sizeof(int), hipMemcpyDeviceToDevice);
hipMemcpy(device_array_b + i, device_array_c, BUF_2M * sizeof(int), hipMemcpyDeviceToDevice);
}
// TODO 5: Copy from device to host
hipMemcpy(host_array_a, device_array_a, num_bytes, hipMemcpyDeviceToHost);
hipMemcpy(host_array_b, device_array_b, num_bytes, hipMemcpyDeviceToHost);
printf("\nAfter swap:\n");
printf("a[i]\tb[i]\n");
for (int i = 0; i < 10; ++i) {
printf("%d\t%d\n", host_array_a[i], host_array_b[i]);
}
// TODO 6: Free the memory
free(host_array_a);
free(host_array_b);
hipFree(device_array_a);
hipFree(device_array_b);
hipFree(device_array_c);
return 0;
}
| 321ccd49bdf7fea01d3aa7614f21784be2082fc5.cu | #include <stdio.h>
#include <math.h>
#include "utils/utils.h"
#define BUF_2M (2 * 1024 * 1024)
#define BUF_32M (32 * 1024 * 1024)
int main(void) {
cudaSetDevice(0);
int *host_array_a = 0;
int *host_array_b = 0;
int *device_array_a = 0;
int *device_array_b = 0;
int *device_array_c = 0;
int num_bytes = BUF_32M * sizeof(int);
// TODO 1: Allocate the host's arrays with the specified number of elements:
// host_array_a => 32M
// host_array_b => 32M
host_array_a = (int *)malloc(num_bytes);
host_array_b = (int *)malloc(num_bytes);
// TODO 2: Allocate the device's arrays with the specified number of elements:
// device_array_a => 32M
// device_array_b => 32M
// device_array_c => 2M
cudaMalloc((void **) &device_array_a, num_bytes);
cudaMalloc((void **) &device_array_b, num_bytes);
cudaMalloc((void **) &device_array_c, num_bytes);
// Check for allocation errors
if (host_array_a == 0 || host_array_b == 0 ||
device_array_a == 0 || device_array_b == 0 ||
device_array_c == 0) {
printf("[*] Error!\n");
return 1;
}
for (int i = 0; i < BUF_32M; ++i) {
host_array_a[i] = i % 32;
host_array_b[i] = i % 2;
}
printf("Before swap:\n");
printf("a[i]\tb[i]\n");
for (int i = 0; i < 10; ++i) {
printf("%d\t%d\n", host_array_a[i], host_array_b[i]);
}
// TODO 3: Copy from host to device
cudaMemcpy(device_array_a, host_array_a, num_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(device_array_b, host_array_b, num_bytes, cudaMemcpyHostToDevice);
// TODO 4: Swap the buffers (BUF_2M values each iteration)
// Hint 1: device_array_c should be used as a temporary buffer
// Hint 2: cudaMemcpy
for (int i = 0; i < BUF_32M; i += BUF_2M) {
cudaMemcpy(device_array_c, device_array_a + i, BUF_2M * sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemcpy(device_array_a + i, device_array_b + i, BUF_2M * sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemcpy(device_array_b + i, device_array_c, BUF_2M * sizeof(int), cudaMemcpyDeviceToDevice);
}
// TODO 5: Copy from device to host
cudaMemcpy(host_array_a, device_array_a, num_bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(host_array_b, device_array_b, num_bytes, cudaMemcpyDeviceToHost);
printf("\nAfter swap:\n");
printf("a[i]\tb[i]\n");
for (int i = 0; i < 10; ++i) {
printf("%d\t%d\n", host_array_a[i], host_array_b[i]);
}
// TODO 6: Free the memory
free(host_array_a);
free(host_array_b);
cudaFree(device_array_a);
cudaFree(device_array_b);
cudaFree(device_array_c);
return 0;
}
|
a1f369afc342c15465ecfb10f368d928e8f3d0ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@author Azzam Haidar
@author Tingxing Dong
@precisions normal z -> s d c
*/
#include "common_magma.h"
#define zgemv_bs 32
extern __shared__ magmaDoubleComplex shared_data[];
__global__ void
kernel_zgemvn_batched(
int m, int n, magmaDoubleComplex alpha,
magmaDoubleComplex **dA_array, int lda,
magmaDoubleComplex **x_array, int incx,
magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy)
{
magmaDoubleComplex *A = dA_array[blockIdx.x];
magmaDoubleComplex *x = x_array[blockIdx.x];
magmaDoubleComplex *y = y_array[blockIdx.x];
int tx = threadIdx.x;
magmaDoubleComplex res = MAGMA_Z_ZERO;
magmaDoubleComplex *buff = (magmaDoubleComplex*)shared_data;
if(tx < n)
{
buff[tx] = x[tx*incx];
}
__syncthreads();
if(tx < m )
{
for(int j=0; j < n ; j++)
{
res += A[tx]*buff[j];
A += lda;
}
y[tx*incy] = alpha * res + y[tx*incy] * beta;
}
}
/*
Matrix Non-transpose Vector Multiplication
y := alpha*A*x + beta*y,
*/
extern "C"
void magmablas_zgemvn_batched(
int m, int n,
magmaDoubleComplex alpha, magmaDoubleComplex **dA_array, int lda,
magmaDoubleComplex **x_array, int incx,
magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy,
int batchCount)
{
if( m > 512 || n > 512)
{
fprintf( stderr, "m=%d, n=%d, zgemv_batched nontranspose assume row && column lower than %d. Plz call magmablas_zgemv instead", m, n, 512);
return ;
}
dim3 grid(batchCount, 1, 1);
dim3 threads(max(m,n), 1, 1);
hipLaunchKernelGGL(( kernel_zgemvn_batched), dim3(grid), dim3(threads), n * sizeof(magmaDoubleComplex) , 0, m, n, alpha, dA_array, lda, x_array, incx,
beta, y_array, incy);
}
__global__ void
kernel_zgemvt_batched(
int m, int n, int m1, magmaDoubleComplex alpha,
magmaDoubleComplex **dA_array, int lda,
magmaDoubleComplex **x_array, int incx,
magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy)
{
magmaDoubleComplex *A_ptr = dA_array[blockIdx.x];
magmaDoubleComplex *x_ptr = x_array[blockIdx.x];
magmaDoubleComplex *y_ptr = y_array[blockIdx.x];
int tx = threadIdx.x;
magmaDoubleComplex res = MAGMA_Z_ZERO;
if(tx<m)
{
A_ptr += lda * blockIdx.y + tx;
x_ptr += tx * incx;
}
__shared__ magmaDoubleComplex sdata[zgemv_bs];
for(int i=0; i<m1; i+= zgemv_bs)
{
res += A_ptr[i] * x_ptr[i*incx];
}
if(m > m1)
{
if( tx + m1 < m )
{
res += A_ptr[m1] * x_ptr[m1*incx];
}
else
{
res = res;
}
}
sdata[tx] = res;
__syncthreads();
for(int s=blockDim.x/2; s>32;s>>=1)
{
if(tx<s)
{
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if(zgemv_bs > 32)
{
if(tx<32)
{
sdata[tx] += sdata[tx+32];
}
}
if(tx == 0)
{
for(int i=1;i<32;i++)
{
sdata[tx] += sdata[tx + i];
}
y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy];
}
}
/*
Matrix Transpose Vector Multiplication
y := alpha* A**T *x + beta*y,
*/
extern "C"
void magmablas_zgemvt_batched(
int m, int n,
magmaDoubleComplex alpha, magmaDoubleComplex **dA_array, int lda,
magmaDoubleComplex **x_array, int incx,
magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy,
int batchCount)
{
dim3 grid(batchCount, n, 1);
dim3 threads(zgemv_bs, 1, 1);
int m1 = (m / zgemv_bs) * zgemv_bs;
hipLaunchKernelGGL(( kernel_zgemvt_batched) , dim3(grid), dim3(threads) , 0, 0, m, n, m1, alpha, dA_array, lda, x_array, incx, beta, y_array, incy);
}
#if defined(PRECISION_z) || defined (PRECISION_c)
__global__ void
kernel_zgemvc_batched(
int m, int n, int m1, magmaDoubleComplex alpha,
magmaDoubleComplex **dA_array, int lda,
magmaDoubleComplex **x_array, int incx,
magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy)
{
magmaDoubleComplex *A_ptr = dA_array[blockIdx.x];
magmaDoubleComplex *x_ptr = x_array[blockIdx.x];
magmaDoubleComplex *y_ptr = y_array[blockIdx.x];
int tx = threadIdx.x;
magmaDoubleComplex res = MAGMA_Z_ZERO;
if(tx<m)
{
A_ptr += lda * blockIdx.y + tx;
x_ptr += tx * incx;
}
__shared__ magmaDoubleComplex sdata[zgemv_bs];
for(int i=0; i<m1; i+= zgemv_bs)
{
res += MAGMA_Z_CNJG (A_ptr[i]) * x_ptr[i*incx];
}
if(m > m1)
{
if( tx + m1 < m )
{
res += MAGMA_Z_CNJG(A_ptr[m1]) * x_ptr[m1*incx];
}
else
{
res = res;
}
}
sdata[tx] = res;
__syncthreads();
for(int s=blockDim.x/2; s>32;s>>=1)
{
if(tx<s)
{
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if(zgemv_bs > 32)
{
if(tx<32)
{
sdata[tx] += sdata[tx+32];
}
}
if(tx == 0)
{
for(int i=1;i<32;i++)
{
sdata[tx] += sdata[tx + i];
}
y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy];
}
}
/*
Matrix Conjugate Transpose Vector Multiplication
y := alpha* A**H *x + beta*y,
*/
extern "C"
void magmablas_zgemvc_batched(
int m, int n,
magmaDoubleComplex alpha, magmaDoubleComplex **dA_array, int lda,
magmaDoubleComplex **x_array, int incx,
magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy,
int batchCount)
{
dim3 grid(batchCount, n, 1);
dim3 threads(zgemv_bs, 1, 1);
int m1 = (m / zgemv_bs) * zgemv_bs;
hipLaunchKernelGGL(( kernel_zgemvc_batched) , dim3(grid), dim3(threads) , 0, 0, m, n, m1, alpha, dA_array, lda, x_array, incx, beta, y_array, incy);
}
#endif // defined(PRECISION_z) || defined (PRECISION_c)
/**
Purpose
-------
This routine computes Y = alpha opt(A) x + beta y, on the GPU, where
A = dA_array[i],x = x_array[i] and y = y_array[i], i=[0,batchCount-1].
This is a batched version.
@param[in]
trans CHARACTER*1.
On entry, TRANS specifies the form of op( A ) to be used in
the matrix multiplication as follows:
= 'N': op( A ) = A.
= 'T': op( A ) = A**T.
= 'C': op( A ) = A**H.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix opt(A).
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix opt(A)
@param[in]
alpha COMPLEX*16.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA_array A = dA_array[i]
A: COMPLEX*16 array of dimension ( LDA, n ) on the GPU.
@param[in]
lda INTEGER.
LDA specifies the leading dimension of A.
@param[in]
x_array x = x_array[i]
x: COMPLEX*16 array of dimension.
n if trans == MagmaNoTrans.
m if trans == MagmaTrans or MagmaConjTrans.
@param[in]
incx INTEGER.
incx specifies the increment for the elments of x.
incx must not be zero.
@param[in]
beta DOUBLE PRECISION.
On entry, BETA specifies the scalar beta.
@param[out]
y_array y = y_array[i]:
On exit y = alpha opt(A) x + beta y.
y: COMPLEX*16 array of dimension.
m if trans == MagmaNoTrans.
n if trans == MagmaTrans or MagmaConjTrans.
@param[in]
incy INTEGER.
incy specifies the increment for the elments of y.
incy must not be zero.
@param[in]
batchCount INTEGER
number of pointers contained in dA_array, x_array and y_array.
@ingroup magma_zblas2
******************************************************************* */
extern "C"
void magmablas_zgemv_batched(
magma_trans_t trans, magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dA_array[], magma_int_t ldda,
magmaDoubleComplex_ptr dx_array[], magma_int_t incx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy_array[], magma_int_t incy,
magma_int_t batchCount)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if(m==0 || n ==0 ) return;
if ( trans == MagmaNoTrans ) {
magmablas_zgemvn_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
}
else if ( trans == MagmaTrans ) {
magmablas_zgemvt_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
}
else if ( trans == MagmaConjTrans ) {
#if defined(PRECISION_z) || defined (PRECISION_c)
magmablas_zgemvc_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
#else
magmablas_zgemvt_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
#endif
}
else {
fprintf( stderr, "trans = %c is invalid\n", lapacke_trans_const(trans) );
}
}
#undef zgemv_bs
| a1f369afc342c15465ecfb10f368d928e8f3d0ff.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@author Azzam Haidar
@author Tingxing Dong
@precisions normal z -> s d c
*/
#include "common_magma.h"
#define zgemv_bs 32
extern __shared__ magmaDoubleComplex shared_data[];
__global__ void
kernel_zgemvn_batched(
int m, int n, magmaDoubleComplex alpha,
magmaDoubleComplex **dA_array, int lda,
magmaDoubleComplex **x_array, int incx,
magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy)
{
magmaDoubleComplex *A = dA_array[blockIdx.x];
magmaDoubleComplex *x = x_array[blockIdx.x];
magmaDoubleComplex *y = y_array[blockIdx.x];
int tx = threadIdx.x;
magmaDoubleComplex res = MAGMA_Z_ZERO;
magmaDoubleComplex *buff = (magmaDoubleComplex*)shared_data;
if(tx < n)
{
buff[tx] = x[tx*incx];
}
__syncthreads();
if(tx < m )
{
for(int j=0; j < n ; j++)
{
res += A[tx]*buff[j];
A += lda;
}
y[tx*incy] = alpha * res + y[tx*incy] * beta;
}
}
/*
Matrix Non-transpose Vector Multiplication
y := alpha*A*x + beta*y,
*/
extern "C"
void magmablas_zgemvn_batched(
int m, int n,
magmaDoubleComplex alpha, magmaDoubleComplex **dA_array, int lda,
magmaDoubleComplex **x_array, int incx,
magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy,
int batchCount)
{
if( m > 512 || n > 512)
{
fprintf( stderr, "m=%d, n=%d, zgemv_batched nontranspose assume row && column lower than %d. Plz call magmablas_zgemv instead", m, n, 512);
return ;
}
dim3 grid(batchCount, 1, 1);
dim3 threads(max(m,n), 1, 1);
kernel_zgemvn_batched<<< grid, threads, n * sizeof(magmaDoubleComplex) >>>( m, n, alpha, dA_array, lda, x_array, incx,
beta, y_array, incy);
}
__global__ void
kernel_zgemvt_batched(
int m, int n, int m1, magmaDoubleComplex alpha,
magmaDoubleComplex **dA_array, int lda,
magmaDoubleComplex **x_array, int incx,
magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy)
{
magmaDoubleComplex *A_ptr = dA_array[blockIdx.x];
magmaDoubleComplex *x_ptr = x_array[blockIdx.x];
magmaDoubleComplex *y_ptr = y_array[blockIdx.x];
int tx = threadIdx.x;
magmaDoubleComplex res = MAGMA_Z_ZERO;
if(tx<m)
{
A_ptr += lda * blockIdx.y + tx;
x_ptr += tx * incx;
}
__shared__ magmaDoubleComplex sdata[zgemv_bs];
for(int i=0; i<m1; i+= zgemv_bs)
{
res += A_ptr[i] * x_ptr[i*incx];
}
if(m > m1)
{
if( tx + m1 < m )
{
res += A_ptr[m1] * x_ptr[m1*incx];
}
else
{
res = res;
}
}
sdata[tx] = res;
__syncthreads();
for(int s=blockDim.x/2; s>32;s>>=1)
{
if(tx<s)
{
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if(zgemv_bs > 32)
{
if(tx<32)
{
sdata[tx] += sdata[tx+32];
}
}
if(tx == 0)
{
for(int i=1;i<32;i++)
{
sdata[tx] += sdata[tx + i];
}
y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy];
}
}
/*
Matrix Transpose Vector Multiplication
y := alpha* A**T *x + beta*y,
*/
extern "C"
void magmablas_zgemvt_batched(
int m, int n,
magmaDoubleComplex alpha, magmaDoubleComplex **dA_array, int lda,
magmaDoubleComplex **x_array, int incx,
magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy,
int batchCount)
{
dim3 grid(batchCount, n, 1);
dim3 threads(zgemv_bs, 1, 1);
int m1 = (m / zgemv_bs) * zgemv_bs;
kernel_zgemvt_batched <<< grid, threads >>>(m, n, m1, alpha, dA_array, lda, x_array, incx, beta, y_array, incy);
}
#if defined(PRECISION_z) || defined (PRECISION_c)
__global__ void
kernel_zgemvc_batched(
int m, int n, int m1, magmaDoubleComplex alpha,
magmaDoubleComplex **dA_array, int lda,
magmaDoubleComplex **x_array, int incx,
magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy)
{
magmaDoubleComplex *A_ptr = dA_array[blockIdx.x];
magmaDoubleComplex *x_ptr = x_array[blockIdx.x];
magmaDoubleComplex *y_ptr = y_array[blockIdx.x];
int tx = threadIdx.x;
magmaDoubleComplex res = MAGMA_Z_ZERO;
if(tx<m)
{
A_ptr += lda * blockIdx.y + tx;
x_ptr += tx * incx;
}
__shared__ magmaDoubleComplex sdata[zgemv_bs];
for(int i=0; i<m1; i+= zgemv_bs)
{
res += MAGMA_Z_CNJG (A_ptr[i]) * x_ptr[i*incx];
}
if(m > m1)
{
if( tx + m1 < m )
{
res += MAGMA_Z_CNJG(A_ptr[m1]) * x_ptr[m1*incx];
}
else
{
res = res;
}
}
sdata[tx] = res;
__syncthreads();
for(int s=blockDim.x/2; s>32;s>>=1)
{
if(tx<s)
{
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if(zgemv_bs > 32)
{
if(tx<32)
{
sdata[tx] += sdata[tx+32];
}
}
if(tx == 0)
{
for(int i=1;i<32;i++)
{
sdata[tx] += sdata[tx + i];
}
y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy];
}
}
/*
Matrix Conjugate Transpose Vector Multiplication
y := alpha* A**H *x + beta*y,
*/
extern "C"
void magmablas_zgemvc_batched(
int m, int n,
magmaDoubleComplex alpha, magmaDoubleComplex **dA_array, int lda,
magmaDoubleComplex **x_array, int incx,
magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy,
int batchCount)
{
dim3 grid(batchCount, n, 1);
dim3 threads(zgemv_bs, 1, 1);
int m1 = (m / zgemv_bs) * zgemv_bs;
kernel_zgemvc_batched <<< grid, threads >>>(m, n, m1, alpha, dA_array, lda, x_array, incx, beta, y_array, incy);
}
#endif // defined(PRECISION_z) || defined (PRECISION_c)
/**
Purpose
-------
This routine computes Y = alpha opt(A) x + beta y, on the GPU, where
A = dA_array[i],x = x_array[i] and y = y_array[i], i=[0,batchCount-1].
This is a batched version.
@param[in]
trans CHARACTER*1.
On entry, TRANS specifies the form of op( A ) to be used in
the matrix multiplication as follows:
= 'N': op( A ) = A.
= 'T': op( A ) = A**T.
= 'C': op( A ) = A**H.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix opt(A).
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix opt(A)
@param[in]
alpha COMPLEX*16.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA_array A = dA_array[i]
A: COMPLEX*16 array of dimension ( LDA, n ) on the GPU.
@param[in]
lda INTEGER.
LDA specifies the leading dimension of A.
@param[in]
x_array x = x_array[i]
x: COMPLEX*16 array of dimension.
n if trans == MagmaNoTrans.
m if trans == MagmaTrans or MagmaConjTrans.
@param[in]
incx INTEGER.
incx specifies the increment for the elments of x.
incx must not be zero.
@param[in]
beta DOUBLE PRECISION.
On entry, BETA specifies the scalar beta.
@param[out]
y_array y = y_array[i]:
On exit y = alpha opt(A) x + beta y.
y: COMPLEX*16 array of dimension.
m if trans == MagmaNoTrans.
n if trans == MagmaTrans or MagmaConjTrans.
@param[in]
incy INTEGER.
incy specifies the increment for the elments of y.
incy must not be zero.
@param[in]
batchCount INTEGER
number of pointers contained in dA_array, x_array and y_array.
@ingroup magma_zblas2
******************************************************************* */
extern "C"
void magmablas_zgemv_batched(
magma_trans_t trans, magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dA_array[], magma_int_t ldda,
magmaDoubleComplex_ptr dx_array[], magma_int_t incx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy_array[], magma_int_t incy,
magma_int_t batchCount)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if(m==0 || n ==0 ) return;
if ( trans == MagmaNoTrans ) {
magmablas_zgemvn_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
}
else if ( trans == MagmaTrans ) {
magmablas_zgemvt_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
}
else if ( trans == MagmaConjTrans ) {
#if defined(PRECISION_z) || defined (PRECISION_c)
magmablas_zgemvc_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
#else
magmablas_zgemvt_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
#endif
}
else {
fprintf( stderr, "trans = %c is invalid\n", lapacke_trans_const(trans) );
}
}
#undef zgemv_bs
|
7792d78797f4c991bb6a129ac8c3c00247e1feaf.hip | // !!! This is a file automatically generated by hipify!!!
/*
* _tt_line_project_ray_gpu.cu
*
* NiftyRec
* Stefano Pedemonte, May 2012.
* CMIC - Centre for Medical Image Computing
* UCL - University College London.
* Released under BSD licence, see LICENSE.txt
*/
#ifndef _TTPROJECTRAY_CU_
#define _TTPROJECTRAY_CU_
// Utilities and System includes
//#include <_tt_project_ray_gpu.h>
#define MAX_EPSILON_ERROR 5.00f
#define THRESHOLD 0.30f
#define MAX(a,b) ((a > b) ? a : b)
//#include <cutil_inline.h>
#include "_reg_blocksize_gpu.h"
#include <hip/hip_vector_types.h>
#include <vector_functions.h>
#include <driver_functions.h>
//#include <sys/time.h>
#include <_tt_common.h>
inline int iDivUp(int a, int b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
int set_inViewMatrix(float *invViewMatrix, float_2 detector_scale, float_3 detector_transl, float_3 detector_rotat)
{
memset((void*)invViewMatrix,0,12*sizeof(float));
//rotate
mat_44 *rotation = (mat_44 *)calloc(1,sizeof(mat_44));
create_rotation_matrix44(rotation, detector_rotat.x,detector_rotat.y,detector_rotat.z,0,0,0);
//scale
mat_44 *scale = (mat_44 *)calloc(1,sizeof(mat_44));
scale->m[0][0] =detector_scale.x;
scale->m[1][1] =detector_scale.y;
scale->m[2][2] =1;
//transform
mat_44 *m = (mat_44 *)calloc(1,sizeof(mat_44));
*m = reg_mat_44_mul(rotation,scale);
invViewMatrix[0]=m->m[0][0]; invViewMatrix[1]=m->m[0][1]; invViewMatrix[2] =m->m[0][2];
invViewMatrix[4]=m->m[1][0]; invViewMatrix[5]=m->m[1][1]; invViewMatrix[6] =m->m[1][2];
invViewMatrix[8]=m->m[2][0]; invViewMatrix[9]=m->m[2][1]; invViewMatrix[10]=m->m[2][2];
//translate
invViewMatrix[3] =detector_transl.x;
invViewMatrix[7] =detector_transl.y;
invViewMatrix[11]=detector_transl.z;
//cleanup
free(rotation);
free(scale);
free(m);
return 0;
}
#endif
| 7792d78797f4c991bb6a129ac8c3c00247e1feaf.cu | /*
* _tt_line_project_ray_gpu.cu
*
* NiftyRec
* Stefano Pedemonte, May 2012.
* CMIC - Centre for Medical Image Computing
* UCL - University College London.
* Released under BSD licence, see LICENSE.txt
*/
#ifndef _TTPROJECTRAY_CU_
#define _TTPROJECTRAY_CU_
// Utilities and System includes
//#include <_tt_project_ray_gpu.h>
#define MAX_EPSILON_ERROR 5.00f
#define THRESHOLD 0.30f
#define MAX(a,b) ((a > b) ? a : b)
//#include <cutil_inline.h>
#include "_reg_blocksize_gpu.h"
#include <vector_types.h>
#include <vector_functions.h>
#include <driver_functions.h>
//#include <sys/time.h>
#include <_tt_common.h>
inline int iDivUp(int a, int b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
int set_inViewMatrix(float *invViewMatrix, float_2 detector_scale, float_3 detector_transl, float_3 detector_rotat)
{
memset((void*)invViewMatrix,0,12*sizeof(float));
//rotate
mat_44 *rotation = (mat_44 *)calloc(1,sizeof(mat_44));
create_rotation_matrix44(rotation, detector_rotat.x,detector_rotat.y,detector_rotat.z,0,0,0);
//scale
mat_44 *scale = (mat_44 *)calloc(1,sizeof(mat_44));
scale->m[0][0] =detector_scale.x;
scale->m[1][1] =detector_scale.y;
scale->m[2][2] =1;
//transform
mat_44 *m = (mat_44 *)calloc(1,sizeof(mat_44));
*m = reg_mat_44_mul(rotation,scale);
invViewMatrix[0]=m->m[0][0]; invViewMatrix[1]=m->m[0][1]; invViewMatrix[2] =m->m[0][2];
invViewMatrix[4]=m->m[1][0]; invViewMatrix[5]=m->m[1][1]; invViewMatrix[6] =m->m[1][2];
invViewMatrix[8]=m->m[2][0]; invViewMatrix[9]=m->m[2][1]; invViewMatrix[10]=m->m[2][2];
//translate
invViewMatrix[3] =detector_transl.x;
invViewMatrix[7] =detector_transl.y;
invViewMatrix[11]=detector_transl.z;
//cleanup
free(rotation);
free(scale);
free(m);
return 0;
}
#endif
|
9c4560b1b4a255d5b80dd6535f8c67dc2902fc91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Copyright 2021 Brian Hamilton //
// //
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated //
// documentation files (the "Software"), to deal in the Software without restriction, including without //
// limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of //
// the Software, and to permit persons to whom the Software is furnished to do so, subject to the following //
// conditions: //
// //
// The above copyright notice and this permission notice shall be included in all copies or substantial //
// portions of the Software. //
// //
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT //
// LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO //
// EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN //
// AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE //
// OR OTHER DEALINGS IN THE SOFTWARE. //
// //
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// //
// FDTD tutorial for 180th ASA meeting - CUDA Kernels to accompany Matlab code //
// //
// Compiles to PTX from Matlab, but can be compiled to PTX with 'nvcc --ptx kernel_2d.cu' //
// //
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
//air update
__global__ void air_update(double *u0, const double * __restrict__ u1, const double * __restrict__ u2, int Nx, int Ny, bool * in_mask)
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
if ((ix>0) && (ix<Nx-1) && (iy>0) && (iy<Ny-1)) {
int ii = iy*Nx+ix;
u0[ii] = (0.5*(u1[ii-1]+u1[ii+1]+u1[ii-Nx]+u1[ii+Nx]) - u2[ii])*in_mask[ii];
}
}
//rigid boundary update
__global__ void rigid_update(double *u0, const double * __restrict__ u1, const double * __restrict__ u2, int Nx, int Nb, int * ib, int * Kib)
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
if (ix<Nb) {
int ii = ib[ix]-1; //from matlab indices
u0[ii] = (2-0.5*Kib[ix])*u1[ii] + 0.5*(u1[ii-1]+u1[ii+1]+u1[ii-Nx]+u1[ii+Nx]) - u2[ii];
}
}
//add loss to boundary nodes
__global__ void apply_loss(double *u0, const double * __restrict__ u2, int Nx, int Nb, int * ib, int * Kib, double lf)
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
if (ix<Nb) {
int ii = ib[ix]-1; //from matlab indices
u0[ii] = (u0[ii] + lf*(4-Kib[ix])*u2[ii])/(1.0+lf*(4-Kib[ix]));
}
}
| 9c4560b1b4a255d5b80dd6535f8c67dc2902fc91.cu | //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Copyright 2021 Brian Hamilton //
// //
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated //
// documentation files (the "Software"), to deal in the Software without restriction, including without //
// limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of //
// the Software, and to permit persons to whom the Software is furnished to do so, subject to the following //
// conditions: //
// //
// The above copyright notice and this permission notice shall be included in all copies or substantial //
// portions of the Software. //
// //
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT //
// LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO //
// EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN //
// AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE //
// OR OTHER DEALINGS IN THE SOFTWARE. //
// //
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// //
// FDTD tutorial for 180th ASA meeting - CUDA Kernels to accompany Matlab code //
// //
// Compiles to PTX from Matlab, but can be compiled to PTX with 'nvcc --ptx kernel_2d.cu' //
// //
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
//air update
__global__ void air_update(double *u0, const double * __restrict__ u1, const double * __restrict__ u2, int Nx, int Ny, bool * in_mask)
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
if ((ix>0) && (ix<Nx-1) && (iy>0) && (iy<Ny-1)) {
int ii = iy*Nx+ix;
u0[ii] = (0.5*(u1[ii-1]+u1[ii+1]+u1[ii-Nx]+u1[ii+Nx]) - u2[ii])*in_mask[ii];
}
}
//rigid boundary update
__global__ void rigid_update(double *u0, const double * __restrict__ u1, const double * __restrict__ u2, int Nx, int Nb, int * ib, int * Kib)
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
if (ix<Nb) {
int ii = ib[ix]-1; //from matlab indices
u0[ii] = (2-0.5*Kib[ix])*u1[ii] + 0.5*(u1[ii-1]+u1[ii+1]+u1[ii-Nx]+u1[ii+Nx]) - u2[ii];
}
}
//add loss to boundary nodes
__global__ void apply_loss(double *u0, const double * __restrict__ u2, int Nx, int Nb, int * ib, int * Kib, double lf)
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
if (ix<Nb) {
int ii = ib[ix]-1; //from matlab indices
u0[ii] = (u0[ii] + lf*(4-Kib[ix])*u2[ii])/(1.0+lf*(4-Kib[ix]));
}
}
|
8978800f06d9b99deaa66cdd387be651129f07cb.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 4, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 8978800f06d9b99deaa66cdd387be651129f07cb.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 4, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
865fa51f8df27a5fe2ea630746629ae1e94df74e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include "tools/util/host_matrix.h"
#include "tools/util/tensor_view_io.h"
#include "cutlass/shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tile_iterator.h"
#include "cutlass/tile_traits_standard.h"
#include "cutlass/zip_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
/// Kernel which can use tile iterators and zip iterators
template <typename LoadIterator, typename StoreIterator>
__global__ void zip_iterator_kernel(
typename LoadIterator::Params load_params,
typename StoreIterator::Params store_params) {
LoadIterator load_iterator(load_params);
StoreIterator store_iterator(store_params);
typename LoadIterator::Fragment fragment;
load_iterator.load_post_increment(fragment);
store_iterator.store_post_increment(fragment);
}
} // namespace test
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Test framework
template <typename Scalar, typename Shape>
struct ZipIteratorTest {
//
// Type definitions
//
static int const kThreadCount = 128;
typedef cutlass::TileTraitsStandard<Shape, kThreadCount> TileTraits;
typedef cutlass::TileLoadIterator<TileTraits, Scalar> ScalarLoadIterator;
typedef cutlass::TileStoreIterator<TileTraits, Scalar> ScalarStoreIterator;
typedef cutlass::ZipTileIterator<ScalarLoadIterator, ScalarLoadIterator> ZipLoadIterator;
typedef cutlass::ZipTileIterator<ScalarStoreIterator, ScalarStoreIterator> ZipStoreIterator;
//
// Data members
//
cutlass::HostMatrix<Scalar> tensor_source_real;
cutlass::HostMatrix<Scalar> tensor_source_imag;
cutlass::HostMatrix<Scalar> tensor_dest_real;
cutlass::HostMatrix<Scalar> tensor_dest_imag;
//
// Methods
//
/// Ctor
ZipIteratorTest() {
tensor_source_real.resize(cutlass::make_Coord(Shape::kH, Shape::kW), cutlass::MatrixLayout::kRowMajor);
tensor_source_imag.resize(cutlass::make_Coord(Shape::kH, Shape::kW), cutlass::MatrixLayout::kRowMajor);
tensor_dest_real.resize(cutlass::make_Coord(Shape::kH, Shape::kW), cutlass::MatrixLayout::kRowMajor);
tensor_dest_imag.resize(cutlass::make_Coord(Shape::kH, Shape::kW), cutlass::MatrixLayout::kRowMajor);
}
/// Runs test
void run() {
tensor_source_real.fill_sequential();
tensor_source_imag.fill_sequential();
tensor_dest_real.fill(0);
tensor_dest_imag.fill(0);
tensor_source_real.sync_device();
tensor_source_imag.sync_device();
tensor_dest_real.sync_device();
tensor_dest_imag.sync_device();
typename ZipLoadIterator::Params load_params;
typename ZipStoreIterator::Params store_params;
load_params.first.initialize(
tensor_source_real.device_data(),
0,
tensor_source_real.leading_dim(),
1
);
load_params.second.initialize(
tensor_source_imag.device_data(),
0,
tensor_source_real.leading_dim(),
1
);
store_params.first.initialize(
tensor_dest_real.device_data(),
0,
tensor_source_real.leading_dim(),
1
);
store_params.second.initialize(
tensor_dest_imag.device_data(),
0,
tensor_source_real.leading_dim(),
1
);
/// Launch kernel
hipLaunchKernelGGL(( test::zip_iterator_kernel<ZipLoadIterator, ZipStoreIterator>),
dim3(dim3(1,1)),
dim3(dim3(kThreadCount, 1))
, 0, 0,
load_params,
store_params
);
hipError_t result = hipGetLastError();
EXPECT_EQ(result, hipSuccess) << "Error on kernel launch: " << hipGetErrorString(result);
tensor_dest_real.sync_host();
tensor_dest_imag.sync_host();
// Verify equivalence
EXPECT_TRUE(tensor_dest_real.bit_equals(tensor_source_real));
EXPECT_TRUE(tensor_dest_imag.bit_equals(tensor_source_imag));
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(ZipTileIterator, tile_128x8) {
ZipIteratorTest<int, cutlass::Shape<1, 8, 128> >().run();
}
////////////////////////////////////////////////////////////////////////////////////////////////////
| 865fa51f8df27a5fe2ea630746629ae1e94df74e.cu | /***************************************************************************************************
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include "tools/util/host_matrix.h"
#include "tools/util/tensor_view_io.h"
#include "cutlass/shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tile_iterator.h"
#include "cutlass/tile_traits_standard.h"
#include "cutlass/zip_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
/// Kernel which can use tile iterators and zip iterators
template <typename LoadIterator, typename StoreIterator>
__global__ void zip_iterator_kernel(
typename LoadIterator::Params load_params,
typename StoreIterator::Params store_params) {
LoadIterator load_iterator(load_params);
StoreIterator store_iterator(store_params);
typename LoadIterator::Fragment fragment;
load_iterator.load_post_increment(fragment);
store_iterator.store_post_increment(fragment);
}
} // namespace test
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Test framework
template <typename Scalar, typename Shape>
struct ZipIteratorTest {
//
// Type definitions
//
static int const kThreadCount = 128;
typedef cutlass::TileTraitsStandard<Shape, kThreadCount> TileTraits;
typedef cutlass::TileLoadIterator<TileTraits, Scalar> ScalarLoadIterator;
typedef cutlass::TileStoreIterator<TileTraits, Scalar> ScalarStoreIterator;
typedef cutlass::ZipTileIterator<ScalarLoadIterator, ScalarLoadIterator> ZipLoadIterator;
typedef cutlass::ZipTileIterator<ScalarStoreIterator, ScalarStoreIterator> ZipStoreIterator;
//
// Data members
//
cutlass::HostMatrix<Scalar> tensor_source_real;
cutlass::HostMatrix<Scalar> tensor_source_imag;
cutlass::HostMatrix<Scalar> tensor_dest_real;
cutlass::HostMatrix<Scalar> tensor_dest_imag;
//
// Methods
//
/// Ctor
ZipIteratorTest() {
tensor_source_real.resize(cutlass::make_Coord(Shape::kH, Shape::kW), cutlass::MatrixLayout::kRowMajor);
tensor_source_imag.resize(cutlass::make_Coord(Shape::kH, Shape::kW), cutlass::MatrixLayout::kRowMajor);
tensor_dest_real.resize(cutlass::make_Coord(Shape::kH, Shape::kW), cutlass::MatrixLayout::kRowMajor);
tensor_dest_imag.resize(cutlass::make_Coord(Shape::kH, Shape::kW), cutlass::MatrixLayout::kRowMajor);
}
/// Runs test
void run() {
tensor_source_real.fill_sequential();
tensor_source_imag.fill_sequential();
tensor_dest_real.fill(0);
tensor_dest_imag.fill(0);
tensor_source_real.sync_device();
tensor_source_imag.sync_device();
tensor_dest_real.sync_device();
tensor_dest_imag.sync_device();
typename ZipLoadIterator::Params load_params;
typename ZipStoreIterator::Params store_params;
load_params.first.initialize(
tensor_source_real.device_data(),
0,
tensor_source_real.leading_dim(),
1
);
load_params.second.initialize(
tensor_source_imag.device_data(),
0,
tensor_source_real.leading_dim(),
1
);
store_params.first.initialize(
tensor_dest_real.device_data(),
0,
tensor_source_real.leading_dim(),
1
);
store_params.second.initialize(
tensor_dest_imag.device_data(),
0,
tensor_source_real.leading_dim(),
1
);
/// Launch kernel
test::zip_iterator_kernel<ZipLoadIterator, ZipStoreIterator><<<
dim3(1,1),
dim3(kThreadCount, 1)
>>>(
load_params,
store_params
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << "Error on kernel launch: " << cudaGetErrorString(result);
tensor_dest_real.sync_host();
tensor_dest_imag.sync_host();
// Verify equivalence
EXPECT_TRUE(tensor_dest_real.bit_equals(tensor_source_real));
EXPECT_TRUE(tensor_dest_imag.bit_equals(tensor_source_imag));
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(ZipTileIterator, tile_128x8) {
ZipIteratorTest<int, cutlass::Shape<1, 8, 128> >().run();
}
////////////////////////////////////////////////////////////////////////////////////////////////////
|
c5a7f23e96f132fc629bc6d6e7506e859cba02cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "cudacommon.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <cassert>
#include <iostream>
#include <vector>
#include "OptionParser.h"
#include "ResultDatabase.h"
#include "Scan.h"
#include "scan_kernel.h"
using namespace std;
// ****************************************************************************
// Function: addBenchmarkSpecOptions
//
// Purpose:
// Add benchmark specific options parsing
//
// Arguments:
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: August 13, 2009
//
// Modifications:
//
// ****************************************************************************
void addBenchmarkSpecOptions(OptionParser &op)
{
op.addOption("iterations", OPT_INT, "256", "specify scan iterations");
}
// ****************************************************************************
// Function: RunBenchmark
//
// Purpose:
// Executes the scan (parallel prefix sum) benchmark
//
// Arguments:
// resultDB: results from the benchmark are stored in this db
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: August 13, 2009
//
// Modifications:
// 5/18/2011 - KS - Changing to a non-recursive algorithm
// ****************************************************************************
void
RunBenchmark(ResultDatabase &resultDB, OptionParser &op)
{
int device;
hipGetDevice(&device);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
cout << "Running single precision test" << endl;
RunTest<float, float4>("Scan", resultDB, op);
// Test to see if this device supports double precision
if ((deviceProp.major == 1 && deviceProp.minor >= 3) ||
(deviceProp.major >= 2))
{
cout << "Running double precision test" << endl;
RunTest<double, double4>("Scan-DP", resultDB, op);
} else {
cout << "Skipping double precision test" << endl;
char atts[1024] = "DP_Not_Supported";
// resultDB requires neg entry for every possible result
int passes = op.getOptionInt("passes");
for (int k = 0; k < passes; k++) {
resultDB.AddResult("Scan-DP" , atts, "GB/s", FLT_MAX);
resultDB.AddResult("Scan-DP_PCIe" , atts, "GB/s", FLT_MAX);
resultDB.AddResult("Scan-DP_Parity" , atts, "GB/s", FLT_MAX);
}
}
}
template <class T, class vecT>
void RunTest(string testName, ResultDatabase &resultDB, OptionParser &op)
{
int probSizes[4] = { 1, 8, 32, 64 };
int size = probSizes[op.getOptionInt("size")-1];
// Convert to MiB
size = (size * 1024 * 1024) / sizeof(T);
// create input data on CPU
unsigned int bytes = size * sizeof(T);
// Allocate Host Memory
T* h_idata;
T* reference;
T* h_odata;
CUDA_SAFE_CALL(hipHostMalloc((void**) &h_idata, bytes));
CUDA_SAFE_CALL(hipHostMalloc((void**) &reference, bytes));
CUDA_SAFE_CALL(hipHostMalloc((void**) &h_odata, bytes));
// Initialize host memory
cout << "Initializing host memory." << endl;
for (int i = 0; i < size; i++)
{
h_idata[i] = i % 3; // Fill with some pattern
h_odata[i] = i % 3;
}
// Thread configuration
// Note: changing this may require updating the kernel calls below
int num_blocks = 64;
int num_threads = 256;
int smem_size = sizeof(T) * num_threads;
// Allocate device memory
T* d_idata, *d_odata, *d_block_sums;
CUDA_SAFE_CALL(hipMalloc((void**) &d_idata, bytes));
CUDA_SAFE_CALL(hipMalloc((void**) &d_odata, bytes));
CUDA_SAFE_CALL(hipMalloc((void**) &d_block_sums, num_blocks * sizeof(T)));
// Copy data to GPU
cout << "Copying data to device." << endl;
hipEvent_t start, stop;
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&stop));
CUDA_SAFE_CALL(hipEventRecord(start, 0));
CUDA_SAFE_CALL(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipEventRecord(stop, 0);
CUDA_SAFE_CALL(hipEventSynchronize(stop));
// Get elapsed time
float transferTime = 0.0f;
hipEventElapsedTime(&transferTime, start, stop);
transferTime *= 1.e-3;
int passes = op.getOptionInt("passes");
int iters = op.getOptionInt("iterations");
cout << "Running benchmark with size " << size << endl;
for (int k = 0; k < passes; k++)
{
float totalScanTime = 0.0f;
CUDA_SAFE_CALL(hipEventRecord(start, 0));
for (int j = 0; j < iters; j++)
{
// For scan, we use a reduce-then-scan approach
// Each thread block gets an equal portion of the
// input array, and computes the sum.
hipLaunchKernelGGL(( reduce<T,256>), dim3(num_blocks), dim3(num_threads), smem_size, 0,
d_idata, d_block_sums, size);
// Next, a top-level exclusive scan is performed on the array
// of block sums
hipLaunchKernelGGL(( scan_single_block<T,256>), dim3(1), dim3(num_threads), smem_size*2, 0,
d_block_sums, num_blocks);
// Finally, a bottom-level scan is performed by each block
// that is seeded with the scanned value in block sums
hipLaunchKernelGGL(( bottom_scan<T, vecT>), dim3(num_blocks), dim3(num_threads), 2*smem_size, 0,
d_idata, d_odata, d_block_sums, size);
}
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
hipEventElapsedTime(&totalScanTime, start, stop);
float oTransferTime = 0.0f;
CUDA_SAFE_CALL(hipEventRecord(start, 0));
CUDA_SAFE_CALL(hipMemcpy(h_odata, d_odata, bytes,
hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
hipEventElapsedTime(&oTransferTime, start, stop);
// Only add output transfer time once
if (k == 0)
{
transferTime += oTransferTime;
}
// If results aren't correct, don't report perf numbers
if (! scanCPU<T>(h_idata, reference, h_odata, size))
{
return;
}
char atts[1024];
double avgTime = (totalScanTime / (double) iters);
avgTime *= 1.e-3;
sprintf(atts, "%ditems", size);
double gb = (double)(size * sizeof(T)) / (1000. * 1000. * 1000.);
resultDB.AddResult(testName, atts, "GB/s", gb / avgTime);
resultDB.AddResult(testName+"_PCIe", atts, "GB/s",
gb / (avgTime + transferTime));
resultDB.AddResult(testName+"_Parity", atts, "N",
transferTime / avgTime);
}
CUDA_SAFE_CALL(hipFree(d_idata));
CUDA_SAFE_CALL(hipFree(d_odata));
CUDA_SAFE_CALL(hipFree(d_block_sums));
CUDA_SAFE_CALL(hipHostFree(h_idata));
CUDA_SAFE_CALL(hipHostFree(h_odata));
CUDA_SAFE_CALL(hipHostFree(reference));
CUDA_SAFE_CALL(hipEventDestroy(start));
CUDA_SAFE_CALL(hipEventDestroy(stop));
}
// ****************************************************************************
// Function: scanCPU
//
// Purpose:
// Simple cpu scan routine to verify device results
//
// Arguments:
// data : the input data
// reference : space for the cpu solution
// dev_result : result from the device
// size : number of elements
//
// Returns: nothing, prints relevant info to stdout
//
// Programmer: Kyle Spafford
// Creation: August 13, 2009
//
// Modifications:
//
// ****************************************************************************
template <class T>
bool scanCPU(T *data, T* reference, T* dev_result, const size_t size)
{
bool passed = true;
T last = 0.0f;
for (unsigned int i = 0; i < size; ++i)
{
reference[i] = data[i] + last;
last = reference[i];
}
for (unsigned int i = 0; i < size; ++i)
{
if (reference[i] != dev_result[i])
{
#ifdef VERBOSE_OUTPUT
cout << "Mismatch at i: " << i << " ref: " << reference[i]
<< " dev: " << dev_result[i] << endl;
#endif
passed = false;
}
}
cout << "Test ";
if (passed)
cout << "Passed" << endl;
else
cout << "---FAILED---" << endl;
return passed;
}
| c5a7f23e96f132fc629bc6d6e7506e859cba02cc.cu | #include "cudacommon.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <cassert>
#include <iostream>
#include <vector>
#include "OptionParser.h"
#include "ResultDatabase.h"
#include "Scan.h"
#include "scan_kernel.h"
using namespace std;
// ****************************************************************************
// Function: addBenchmarkSpecOptions
//
// Purpose:
// Add benchmark specific options parsing
//
// Arguments:
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: August 13, 2009
//
// Modifications:
//
// ****************************************************************************
void addBenchmarkSpecOptions(OptionParser &op)
{
op.addOption("iterations", OPT_INT, "256", "specify scan iterations");
}
// ****************************************************************************
// Function: RunBenchmark
//
// Purpose:
// Executes the scan (parallel prefix sum) benchmark
//
// Arguments:
// resultDB: results from the benchmark are stored in this db
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: August 13, 2009
//
// Modifications:
// 5/18/2011 - KS - Changing to a non-recursive algorithm
// ****************************************************************************
void
RunBenchmark(ResultDatabase &resultDB, OptionParser &op)
{
int device;
cudaGetDevice(&device);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
cout << "Running single precision test" << endl;
RunTest<float, float4>("Scan", resultDB, op);
// Test to see if this device supports double precision
if ((deviceProp.major == 1 && deviceProp.minor >= 3) ||
(deviceProp.major >= 2))
{
cout << "Running double precision test" << endl;
RunTest<double, double4>("Scan-DP", resultDB, op);
} else {
cout << "Skipping double precision test" << endl;
char atts[1024] = "DP_Not_Supported";
// resultDB requires neg entry for every possible result
int passes = op.getOptionInt("passes");
for (int k = 0; k < passes; k++) {
resultDB.AddResult("Scan-DP" , atts, "GB/s", FLT_MAX);
resultDB.AddResult("Scan-DP_PCIe" , atts, "GB/s", FLT_MAX);
resultDB.AddResult("Scan-DP_Parity" , atts, "GB/s", FLT_MAX);
}
}
}
template <class T, class vecT>
void RunTest(string testName, ResultDatabase &resultDB, OptionParser &op)
{
int probSizes[4] = { 1, 8, 32, 64 };
int size = probSizes[op.getOptionInt("size")-1];
// Convert to MiB
size = (size * 1024 * 1024) / sizeof(T);
// create input data on CPU
unsigned int bytes = size * sizeof(T);
// Allocate Host Memory
T* h_idata;
T* reference;
T* h_odata;
CUDA_SAFE_CALL(cudaMallocHost((void**) &h_idata, bytes));
CUDA_SAFE_CALL(cudaMallocHost((void**) &reference, bytes));
CUDA_SAFE_CALL(cudaMallocHost((void**) &h_odata, bytes));
// Initialize host memory
cout << "Initializing host memory." << endl;
for (int i = 0; i < size; i++)
{
h_idata[i] = i % 3; // Fill with some pattern
h_odata[i] = i % 3;
}
// Thread configuration
// Note: changing this may require updating the kernel calls below
int num_blocks = 64;
int num_threads = 256;
int smem_size = sizeof(T) * num_threads;
// Allocate device memory
T* d_idata, *d_odata, *d_block_sums;
CUDA_SAFE_CALL(cudaMalloc((void**) &d_idata, bytes));
CUDA_SAFE_CALL(cudaMalloc((void**) &d_odata, bytes));
CUDA_SAFE_CALL(cudaMalloc((void**) &d_block_sums, num_blocks * sizeof(T)));
// Copy data to GPU
cout << "Copying data to device." << endl;
cudaEvent_t start, stop;
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
CUDA_SAFE_CALL(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
cudaEventRecord(stop, 0);
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
// Get elapsed time
float transferTime = 0.0f;
cudaEventElapsedTime(&transferTime, start, stop);
transferTime *= 1.e-3;
int passes = op.getOptionInt("passes");
int iters = op.getOptionInt("iterations");
cout << "Running benchmark with size " << size << endl;
for (int k = 0; k < passes; k++)
{
float totalScanTime = 0.0f;
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
for (int j = 0; j < iters; j++)
{
// For scan, we use a reduce-then-scan approach
// Each thread block gets an equal portion of the
// input array, and computes the sum.
reduce<T,256><<<num_blocks, num_threads, smem_size>>>
(d_idata, d_block_sums, size);
// Next, a top-level exclusive scan is performed on the array
// of block sums
scan_single_block<T,256><<<1, num_threads, smem_size*2>>>
(d_block_sums, num_blocks);
// Finally, a bottom-level scan is performed by each block
// that is seeded with the scanned value in block sums
bottom_scan<T, vecT><<<num_blocks, num_threads, 2*smem_size>>>
(d_idata, d_odata, d_block_sums, size);
}
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
cudaEventElapsedTime(&totalScanTime, start, stop);
float oTransferTime = 0.0f;
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
CUDA_SAFE_CALL(cudaMemcpy(h_odata, d_odata, bytes,
cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
cudaEventElapsedTime(&oTransferTime, start, stop);
// Only add output transfer time once
if (k == 0)
{
transferTime += oTransferTime;
}
// If results aren't correct, don't report perf numbers
if (! scanCPU<T>(h_idata, reference, h_odata, size))
{
return;
}
char atts[1024];
double avgTime = (totalScanTime / (double) iters);
avgTime *= 1.e-3;
sprintf(atts, "%ditems", size);
double gb = (double)(size * sizeof(T)) / (1000. * 1000. * 1000.);
resultDB.AddResult(testName, atts, "GB/s", gb / avgTime);
resultDB.AddResult(testName+"_PCIe", atts, "GB/s",
gb / (avgTime + transferTime));
resultDB.AddResult(testName+"_Parity", atts, "N",
transferTime / avgTime);
}
CUDA_SAFE_CALL(cudaFree(d_idata));
CUDA_SAFE_CALL(cudaFree(d_odata));
CUDA_SAFE_CALL(cudaFree(d_block_sums));
CUDA_SAFE_CALL(cudaFreeHost(h_idata));
CUDA_SAFE_CALL(cudaFreeHost(h_odata));
CUDA_SAFE_CALL(cudaFreeHost(reference));
CUDA_SAFE_CALL(cudaEventDestroy(start));
CUDA_SAFE_CALL(cudaEventDestroy(stop));
}
// ****************************************************************************
// Function: scanCPU
//
// Purpose:
// Simple cpu scan routine to verify device results
//
// Arguments:
// data : the input data
// reference : space for the cpu solution
// dev_result : result from the device
// size : number of elements
//
// Returns: nothing, prints relevant info to stdout
//
// Programmer: Kyle Spafford
// Creation: August 13, 2009
//
// Modifications:
//
// ****************************************************************************
template <class T>
bool scanCPU(T *data, T* reference, T* dev_result, const size_t size)
{
bool passed = true;
T last = 0.0f;
for (unsigned int i = 0; i < size; ++i)
{
reference[i] = data[i] + last;
last = reference[i];
}
for (unsigned int i = 0; i < size; ++i)
{
if (reference[i] != dev_result[i])
{
#ifdef VERBOSE_OUTPUT
cout << "Mismatch at i: " << i << " ref: " << reference[i]
<< " dev: " << dev_result[i] << endl;
#endif
passed = false;
}
}
cout << "Test ";
if (passed)
cout << "Passed" << endl;
else
cout << "---FAILED---" << endl;
return passed;
}
|
ed6f268ead40a945c595f343e319e46f1674f170.hip | // !!! This is a file automatically generated by hipify!!!
// Compile: nvcc -g -G -arch=sm_61 -std=c++11 assignment5-p3.cu -o assignment5-p3
// Execute: ./assignment5-p3
#include <cmath>
#include <iostream>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define SIZE 4096
#define THRESHOLD (0.000001)
#define BLOCK_SIZE 16
using std::cerr;
using std::cout;
using std::endl;
double rtclock() { // Seconds
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) {
cout << "Error return from gettimeofday: " << stat << "\n";
}
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
__host__ void ATAonCPU(double* M, double* P) {
for (int k = 0; k < SIZE; k++) {
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++)
P[i*SIZE + j] += M[k*SIZE + i] * M[k*SIZE + j];
}
}
}
__host__ void check_result(double* Test, double* Ref) {
double maxdiff = 0, rel_diff = 0;
int numdiffs = 0;
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
rel_diff = (Test[i*SIZE + j] - Ref[i*SIZE + j]);
if (fabs(rel_diff) > THRESHOLD) {
numdiffs++;
if (rel_diff > maxdiff)
maxdiff = rel_diff;
}
}
}
if (numdiffs > 0)
cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << " Max Diff = " << maxdiff
<< "\n";
else
cout << "No differences found between base and test versions\n";
}
__host__ void reset(double* h_dev_out){
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
h_dev_out[i*SIZE + j] = 0;
}
}
}
__global__ void ATAkernel1(double* A, double* B) {
// TODO: Fill in
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
double val = 0;
for(int k = 0; k < SIZE; k++){
val += A[k*SIZE + i] * A[k*SIZE + j];
}
B[i*SIZE + j] = val;
}
__global__ void ATAkernel2(double* A, double* B) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int top_left_i = blockIdx.y * BLOCK_SIZE;
int top_left_j = blockIdx.x * BLOCK_SIZE;
double val = 0;
for(int block_num = 0; block_num < SIZE/BLOCK_SIZE; block_num++){
__shared__ double mat1[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double mat2[BLOCK_SIZE][BLOCK_SIZE];
mat1[threadIdx.y][threadIdx.x] = A[(block_num * BLOCK_SIZE + threadIdx.y)*SIZE + (top_left_i + threadIdx.x)];
mat2[threadIdx.y][threadIdx.x] = A[(block_num * BLOCK_SIZE + threadIdx.y)*SIZE + (top_left_j + threadIdx.x)];
__syncthreads();
for(int k = 0; k < BLOCK_SIZE; k++){
val += mat1[k][threadIdx.y] * mat2[k][threadIdx.x];
}
__syncthreads();
}
B[i*SIZE + j] = val;
}
__global__ void ATAkernel3(double* A, double* B) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int top_left_i = blockIdx.y * BLOCK_SIZE;
int top_left_j = blockIdx.x * BLOCK_SIZE;
double val = 0;
for(int block_num = 0; block_num < SIZE/BLOCK_SIZE; block_num++){
__shared__ double mat1[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double mat2[BLOCK_SIZE][BLOCK_SIZE];
mat1[threadIdx.y][threadIdx.x] = A[(block_num * BLOCK_SIZE + threadIdx.y)*SIZE + (top_left_i + threadIdx.x)];
mat2[threadIdx.y][threadIdx.x] = A[(block_num * BLOCK_SIZE + threadIdx.y)*SIZE + (top_left_j + threadIdx.x)];
__syncthreads();
for(int k = 0; k < BLOCK_SIZE; k += 4){
val += mat1[k][threadIdx.y] * mat2[k][threadIdx.x];
val += mat1[k+1][threadIdx.y] * mat2[k+1][threadIdx.x];
val += mat1[k+2][threadIdx.y] * mat2[k+2][threadIdx.x];
val += mat1[k+3][threadIdx.y] * mat2[k+3][threadIdx.x];
}
__syncthreads();
}
B[i*SIZE + j] = val;
}
int main() {
cout << "Matrix Size = " << SIZE << "\n";
double* h_in = new double[SIZE*SIZE];
double* h_cpu_out = new double[SIZE*SIZE];
double* h_dev_out = new double[SIZE*SIZE];
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
h_in[i*SIZE + j] = i * j * 0.25;
h_cpu_out[i*SIZE + j] = 0;
h_dev_out[i*SIZE + j] = 0;
}
}
double clkbegin = rtclock();
ATAonCPU(h_in, h_cpu_out);
double clkend = rtclock();
double cpu_time = clkend - clkbegin;
cout << "A^T.A on CPU: " << ((2.0 * SIZE * SIZE * SIZE) / cpu_time)
<< " GFLOPS; Time = " << cpu_time * 1000 << " msec" << endl;
hipError_t status;
hipEvent_t start, end;
float kernel_time;
double* d_in;
double* d_out;
// TODO: Fill in
// first kernel
size_t size = SIZE * SIZE * sizeof(double);
dim3 threadsPerBlock(32,32);
dim3 numBlocks(SIZE/threadsPerBlock.x, SIZE/threadsPerBlock.y);
status = hipMalloc(&d_in, size);
if (status != hipSuccess) {
fprintf(stderr, "hipMalloc() failed");
return EXIT_FAILURE;
}
status = hipMalloc(&d_out, size);
if (status != hipSuccess) {
fprintf(stderr, "hipMalloc() failed");
return EXIT_FAILURE;
}
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
status = hipMemcpy(d_in, h_in, size, hipMemcpyHostToDevice);
if (status != hipSuccess) {
fprintf(stderr, "hipMemcpy() failed");
return EXIT_FAILURE;
}
hipLaunchKernelGGL(( ATAkernel1), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_in, d_out);
status = hipMemcpy(h_dev_out, d_out, size, hipMemcpyDeviceToHost);
if (status != hipSuccess) {
fprintf(stderr, "hipMemcpy() failed");
return EXIT_FAILURE;
}
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&kernel_time, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
check_result(h_cpu_out, h_dev_out);
cout << "A^T.A version1 on GPU: " << ((2.0 * SIZE * SIZE * SIZE) / (kernel_time * 1.0e-03))
<< " GFLOPS; Time = " << kernel_time << " msec" << endl;
// second kernel
reset(h_dev_out);
threadsPerBlock = dim3(BLOCK_SIZE,BLOCK_SIZE);
numBlocks = dim3(SIZE/threadsPerBlock.x, SIZE/threadsPerBlock.y);
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
status = hipMemcpy(d_in, h_in, size, hipMemcpyHostToDevice);
if (status != hipSuccess) {
fprintf(stderr, "hipMemcpy() failed");
return EXIT_FAILURE;
}
hipLaunchKernelGGL(( ATAkernel2), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_in, d_out);
status = hipMemcpy(h_dev_out, d_out, size, hipMemcpyDeviceToHost);
if (status != hipSuccess) {
fprintf(stderr, "hipMemcpy() failed");
return EXIT_FAILURE;
}
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&kernel_time, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
check_result(h_cpu_out, h_dev_out);
cout << "A^T.A version2 on GPU: " << ((2.0 * SIZE * SIZE * SIZE) / (kernel_time * 1.0e-03))
<< " GFLOPS; Time = " << kernel_time << " msec" << endl;
// third kernel
reset(h_dev_out);
threadsPerBlock = dim3(BLOCK_SIZE,BLOCK_SIZE);
numBlocks = dim3(SIZE/threadsPerBlock.x, SIZE/threadsPerBlock.y);
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
status = hipMemcpy(d_in, h_in, size, hipMemcpyHostToDevice);
if (status != hipSuccess) {
fprintf(stderr, "hipMemcpy() failed");
return EXIT_FAILURE;
}
hipLaunchKernelGGL(( ATAkernel3), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_in, d_out);
status = hipMemcpy(h_dev_out, d_out, size, hipMemcpyDeviceToHost);
if (status != hipSuccess) {
fprintf(stderr, "hipMemcpy() failed");
return EXIT_FAILURE;
}
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&kernel_time, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
check_result(h_cpu_out, h_dev_out);
cout << "A^T.A version3 on GPU: " << ((2.0 * SIZE * SIZE * SIZE) / (kernel_time * 1.0e-03))
<< " GFLOPS; Time = " << kernel_time << " msec" << endl;
hipFree(d_in);
hipFree(d_out);
delete[] h_in;
delete[] h_cpu_out;
delete[] h_dev_out;
return EXIT_SUCCESS;
}
| ed6f268ead40a945c595f343e319e46f1674f170.cu | // Compile: nvcc -g -G -arch=sm_61 -std=c++11 assignment5-p3.cu -o assignment5-p3
// Execute: ./assignment5-p3
#include <cmath>
#include <iostream>
#include <sys/time.h>
#include <cuda.h>
#define SIZE 4096
#define THRESHOLD (0.000001)
#define BLOCK_SIZE 16
using std::cerr;
using std::cout;
using std::endl;
double rtclock() { // Seconds
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) {
cout << "Error return from gettimeofday: " << stat << "\n";
}
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
__host__ void ATAonCPU(double* M, double* P) {
for (int k = 0; k < SIZE; k++) {
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++)
P[i*SIZE + j] += M[k*SIZE + i] * M[k*SIZE + j];
}
}
}
__host__ void check_result(double* Test, double* Ref) {
double maxdiff = 0, rel_diff = 0;
int numdiffs = 0;
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
rel_diff = (Test[i*SIZE + j] - Ref[i*SIZE + j]);
if (fabs(rel_diff) > THRESHOLD) {
numdiffs++;
if (rel_diff > maxdiff)
maxdiff = rel_diff;
}
}
}
if (numdiffs > 0)
cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << " Max Diff = " << maxdiff
<< "\n";
else
cout << "No differences found between base and test versions\n";
}
__host__ void reset(double* h_dev_out){
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
h_dev_out[i*SIZE + j] = 0;
}
}
}
__global__ void ATAkernel1(double* A, double* B) {
// TODO: Fill in
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
double val = 0;
for(int k = 0; k < SIZE; k++){
val += A[k*SIZE + i] * A[k*SIZE + j];
}
B[i*SIZE + j] = val;
}
__global__ void ATAkernel2(double* A, double* B) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int top_left_i = blockIdx.y * BLOCK_SIZE;
int top_left_j = blockIdx.x * BLOCK_SIZE;
double val = 0;
for(int block_num = 0; block_num < SIZE/BLOCK_SIZE; block_num++){
__shared__ double mat1[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double mat2[BLOCK_SIZE][BLOCK_SIZE];
mat1[threadIdx.y][threadIdx.x] = A[(block_num * BLOCK_SIZE + threadIdx.y)*SIZE + (top_left_i + threadIdx.x)];
mat2[threadIdx.y][threadIdx.x] = A[(block_num * BLOCK_SIZE + threadIdx.y)*SIZE + (top_left_j + threadIdx.x)];
__syncthreads();
for(int k = 0; k < BLOCK_SIZE; k++){
val += mat1[k][threadIdx.y] * mat2[k][threadIdx.x];
}
__syncthreads();
}
B[i*SIZE + j] = val;
}
__global__ void ATAkernel3(double* A, double* B) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int top_left_i = blockIdx.y * BLOCK_SIZE;
int top_left_j = blockIdx.x * BLOCK_SIZE;
double val = 0;
for(int block_num = 0; block_num < SIZE/BLOCK_SIZE; block_num++){
__shared__ double mat1[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double mat2[BLOCK_SIZE][BLOCK_SIZE];
mat1[threadIdx.y][threadIdx.x] = A[(block_num * BLOCK_SIZE + threadIdx.y)*SIZE + (top_left_i + threadIdx.x)];
mat2[threadIdx.y][threadIdx.x] = A[(block_num * BLOCK_SIZE + threadIdx.y)*SIZE + (top_left_j + threadIdx.x)];
__syncthreads();
for(int k = 0; k < BLOCK_SIZE; k += 4){
val += mat1[k][threadIdx.y] * mat2[k][threadIdx.x];
val += mat1[k+1][threadIdx.y] * mat2[k+1][threadIdx.x];
val += mat1[k+2][threadIdx.y] * mat2[k+2][threadIdx.x];
val += mat1[k+3][threadIdx.y] * mat2[k+3][threadIdx.x];
}
__syncthreads();
}
B[i*SIZE + j] = val;
}
int main() {
cout << "Matrix Size = " << SIZE << "\n";
double* h_in = new double[SIZE*SIZE];
double* h_cpu_out = new double[SIZE*SIZE];
double* h_dev_out = new double[SIZE*SIZE];
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
h_in[i*SIZE + j] = i * j * 0.25;
h_cpu_out[i*SIZE + j] = 0;
h_dev_out[i*SIZE + j] = 0;
}
}
double clkbegin = rtclock();
ATAonCPU(h_in, h_cpu_out);
double clkend = rtclock();
double cpu_time = clkend - clkbegin;
cout << "A^T.A on CPU: " << ((2.0 * SIZE * SIZE * SIZE) / cpu_time)
<< " GFLOPS; Time = " << cpu_time * 1000 << " msec" << endl;
cudaError_t status;
cudaEvent_t start, end;
float kernel_time;
double* d_in;
double* d_out;
// TODO: Fill in
// first kernel
size_t size = SIZE * SIZE * sizeof(double);
dim3 threadsPerBlock(32,32);
dim3 numBlocks(SIZE/threadsPerBlock.x, SIZE/threadsPerBlock.y);
status = cudaMalloc(&d_in, size);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMalloc() failed");
return EXIT_FAILURE;
}
status = cudaMalloc(&d_out, size);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMalloc() failed");
return EXIT_FAILURE;
}
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_in, h_in, size, cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
ATAkernel1<<<numBlocks, threadsPerBlock>>>(d_in, d_out);
status = cudaMemcpy(h_dev_out, d_out, size, cudaMemcpyDeviceToHost);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&kernel_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_cpu_out, h_dev_out);
cout << "A^T.A version1 on GPU: " << ((2.0 * SIZE * SIZE * SIZE) / (kernel_time * 1.0e-03))
<< " GFLOPS; Time = " << kernel_time << " msec" << endl;
// second kernel
reset(h_dev_out);
threadsPerBlock = dim3(BLOCK_SIZE,BLOCK_SIZE);
numBlocks = dim3(SIZE/threadsPerBlock.x, SIZE/threadsPerBlock.y);
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_in, h_in, size, cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
ATAkernel2<<<numBlocks, threadsPerBlock>>>(d_in, d_out);
status = cudaMemcpy(h_dev_out, d_out, size, cudaMemcpyDeviceToHost);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&kernel_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_cpu_out, h_dev_out);
cout << "A^T.A version2 on GPU: " << ((2.0 * SIZE * SIZE * SIZE) / (kernel_time * 1.0e-03))
<< " GFLOPS; Time = " << kernel_time << " msec" << endl;
// third kernel
reset(h_dev_out);
threadsPerBlock = dim3(BLOCK_SIZE,BLOCK_SIZE);
numBlocks = dim3(SIZE/threadsPerBlock.x, SIZE/threadsPerBlock.y);
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_in, h_in, size, cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
ATAkernel3<<<numBlocks, threadsPerBlock>>>(d_in, d_out);
status = cudaMemcpy(h_dev_out, d_out, size, cudaMemcpyDeviceToHost);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&kernel_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_cpu_out, h_dev_out);
cout << "A^T.A version3 on GPU: " << ((2.0 * SIZE * SIZE * SIZE) / (kernel_time * 1.0e-03))
<< " GFLOPS; Time = " << kernel_time << " msec" << endl;
cudaFree(d_in);
cudaFree(d_out);
delete[] h_in;
delete[] h_cpu_out;
delete[] h_dev_out;
return EXIT_SUCCESS;
}
|
fb4a061e75534e80bf1552ef3d2ec19b1414acbe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <iostream>
#include <cstdlib>
#include <cassert>
#include <cmath>
#include <fstream>
#include <sstream>
using namespace std;
class Managed {
public:
void *operator new(size_t len) {
void *ptr;
hipMallocManaged(&ptr, len);
hipDeviceSynchronize();
return ptr;
}
void operator delete(void *ptr) {
hipDeviceSynchronize();
hipFree(ptr);
}
};
class Neuron;
class Neuron : public Managed {
public:
// This is analogous to output_val
unsigned my_cats;
// This is analogous to setOutputVals
__device__ __host__ void eh(unsigned new_num); //You have to put tags here as well, or CUDA won't compile it
};
//This gets highlighted weirdly for me, but it works - you can call it from the CPU or GPU
__host__
__device__
void Neuron:: eh(unsigned new_num) {
Neuron(int my_cats);
my_cats = new_num;
};
class Layer : public Managed {
public:
//Vector of pointers to Neuron objects
vector<Neuron *> neurons;
};
// This is analogous to feedforward
// It just takes my_cats from one neuron and sticks it in another
__global__
void change_cat(Neuron &n_original, Neuron &n_to_copy) {
n_original.my_cats = n_to_copy.my_cats;
tanh(2.0);
}
//Playing with Eh as a device function; it has to be called from a global. That's what this wrapper is for.
__global__
void eh_wrapper(Neuron &n, unsigned new_num) {
n.eh(new_num);
}
//Print out the my_cats value for every neuron in the layer
void print_cats(Layer &layer) {
for (int i = 0; i<layer.neurons.size(); i++) {
printf("Neuron %i has %i\n", i+1, (*(layer.neurons[i])).my_cats);
}
printf("\n");
}
int main(void) {
Neuron *n1 = new Neuron(2); //New returns a pointer to the new object
// (*n1).my_cats = 2;
Neuron *n2 = new Neuron(7);
// (*n2).my_cats = 7;
Layer *layer = new Layer;
(*layer).neurons.push_back(n1); //Should we store a vector of pointers or a vector of neurons?
(*layer).neurons.push_back(n2); //Answer: Vector of neurons didn't work, vector of pointers did
print_cats(*layer);
hipLaunchKernelGGL(( change_cat) , dim3(1), dim3(2), 0, 0, (*n1), (*n2));
hipDeviceSynchronize(); //If you ever get a Bus Error, you probably forgot this line
print_cats(*layer);
hipLaunchKernelGGL(( eh_wrapper) , dim3(1), dim3(1), 0, 0, (*n1), 3);
hipDeviceSynchronize();
print_cats(*layer);
delete n1; delete n2; delete layer;
return 0;
} | fb4a061e75534e80bf1552ef3d2ec19b1414acbe.cu | #include <vector>
#include <iostream>
#include <cstdlib>
#include <cassert>
#include <cmath>
#include <fstream>
#include <sstream>
using namespace std;
class Managed {
public:
void *operator new(size_t len) {
void *ptr;
cudaMallocManaged(&ptr, len);
cudaDeviceSynchronize();
return ptr;
}
void operator delete(void *ptr) {
cudaDeviceSynchronize();
cudaFree(ptr);
}
};
class Neuron;
class Neuron : public Managed {
public:
// This is analogous to output_val
unsigned my_cats;
// This is analogous to setOutputVals
__device__ __host__ void eh(unsigned new_num); //You have to put tags here as well, or CUDA won't compile it
};
//This gets highlighted weirdly for me, but it works - you can call it from the CPU or GPU
__host__
__device__
void Neuron:: eh(unsigned new_num) {
Neuron(int my_cats);
my_cats = new_num;
};
class Layer : public Managed {
public:
//Vector of pointers to Neuron objects
vector<Neuron *> neurons;
};
// This is analogous to feedforward
// It just takes my_cats from one neuron and sticks it in another
__global__
void change_cat(Neuron &n_original, Neuron &n_to_copy) {
n_original.my_cats = n_to_copy.my_cats;
tanh(2.0);
}
//Playing with Eh as a device function; it has to be called from a global. That's what this wrapper is for.
__global__
void eh_wrapper(Neuron &n, unsigned new_num) {
n.eh(new_num);
}
//Print out the my_cats value for every neuron in the layer
void print_cats(Layer &layer) {
for (int i = 0; i<layer.neurons.size(); i++) {
printf("Neuron %i has %i\n", i+1, (*(layer.neurons[i])).my_cats);
}
printf("\n");
}
int main(void) {
Neuron *n1 = new Neuron(2); //New returns a pointer to the new object
// (*n1).my_cats = 2;
Neuron *n2 = new Neuron(7);
// (*n2).my_cats = 7;
Layer *layer = new Layer;
(*layer).neurons.push_back(n1); //Should we store a vector of pointers or a vector of neurons?
(*layer).neurons.push_back(n2); //Answer: Vector of neurons didn't work, vector of pointers did
print_cats(*layer);
change_cat <<<1, 2>>> ((*n1), (*n2));
cudaDeviceSynchronize(); //If you ever get a Bus Error, you probably forgot this line
print_cats(*layer);
eh_wrapper <<<1, 1>>> ((*n1), 3);
cudaDeviceSynchronize();
print_cats(*layer);
delete n1; delete n2; delete layer;
return 0;
} |
e53aa3bcc1254577345e283005b19290f43f7d56.hip | // !!! This is a file automatically generated by hipify!!!
// CUDA Bitmap Selectron prototype
//
// Patrick Walton <[email protected]>
//
// Copyright (c) 2015 Mozilla Corporation
#include "bitmap-selectron.h"
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#define THREAD_COUNT 256
__device__ unsigned hash_device(unsigned key, unsigned seed) {
unsigned int hash = 2166136261;
hash = hash ^ seed;
hash = hash * 16777619;
hash = hash ^ key;
hash = hash * 16777619;
return hash;
}
__global__ void match_selectors_device(const int *ids,
const uint16_t *stylesheet,
uint16_t *matched_rules) {
int node = blockIdx.x * THREAD_COUNT + threadIdx.x;
matched_rules[node] = stylesheet[hash_device(ids[node], 12345) % MAX_ID];
}
int get_cuda_device(bool cpu) {
int device_count;
checkCudaErrors(hipGetDeviceCount(&device_count));
for (int device = 0; device < device_count; device++) {
hipDeviceProp_t device_props;
hipGetDeviceProperties(&device_props, device);
if (device_props.computeMode == hipComputeModeProhibited)
continue;
fprintf(stderr, "found device: %s\n", device_props.name);
return device;
}
fprintf(stderr, "no device found\n");
return 1;
}
int main(int argc, char **argv) {
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
int device_id = get_cuda_device(false);
checkCudaErrors(hipSetDevice(device_id));
srand(time(NULL));
// Create the stylesheet on the host.
uint16_t *host_stylesheet = (uint16_t *)malloc(MAX_ID);
for (int i = 0; i < MAX_ID; i++)
host_stylesheet[i] = rand();
// Create the DOM on the host.
int *host_ids = (int *)malloc(sizeof(int) * NODE_COUNT);
create_dom(host_ids);
// Allocate the IDs and copy over.
int *device_ids;
checkCudaErrors(hipMalloc((void **)&device_ids, sizeof(int) * NODE_COUNT));
checkCudaErrors(hipMemcpy(device_ids,
host_ids,
sizeof(int) * NODE_COUNT,
hipMemcpyHostToDevice));
// Allocate the stylesheet and copy over.
uint16_t *device_stylesheet;
checkCudaErrors(hipMalloc((void **)&device_stylesheet, sizeof(uint16_t) * MAX_ID));
checkCudaErrors(hipMemcpy(device_stylesheet,
host_stylesheet,
sizeof(uint16_t) * MAX_ID,
hipMemcpyHostToDevice));
// Allocate the matched rules.
uint16_t *device_matched_rules;
uint16_t *device_matched_rules_host_mirror = (uint16_t *)malloc(sizeof(uint16_t) * NODE_COUNT);
checkCudaErrors(hipMalloc((uint16_t **)&device_matched_rules, sizeof(uint16_t) * NODE_COUNT));
// Create start/stop events.
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Execute the kernel on the GPU.
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( match_selectors_device), dim3(NODE_COUNT / THREAD_COUNT), dim3(THREAD_COUNT), 0, 0, device_ids,
device_stylesheet,
device_matched_rules);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
float gpu_elapsed = 0.0f;
checkCudaErrors(hipEventElapsedTime(&gpu_elapsed, start, stop));
fprintf(stderr, "Elapsed time: %f ms\n", gpu_elapsed);
for (int i = 0; i <
checkCudaErrors(hipFree(device_matched_rules));
checkCudaErrors(hipFree(device_stylesheet));
checkCudaErrors(hipFree(device_ids));
hipDeviceReset();
return 0;
}
| e53aa3bcc1254577345e283005b19290f43f7d56.cu | // CUDA Bitmap Selectron prototype
//
// Patrick Walton <[email protected]>
//
// Copyright (c) 2015 Mozilla Corporation
#include "bitmap-selectron.h"
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#define THREAD_COUNT 256
__device__ unsigned hash_device(unsigned key, unsigned seed) {
unsigned int hash = 2166136261;
hash = hash ^ seed;
hash = hash * 16777619;
hash = hash ^ key;
hash = hash * 16777619;
return hash;
}
__global__ void match_selectors_device(const int *ids,
const uint16_t *stylesheet,
uint16_t *matched_rules) {
int node = blockIdx.x * THREAD_COUNT + threadIdx.x;
matched_rules[node] = stylesheet[hash_device(ids[node], 12345) % MAX_ID];
}
int get_cuda_device(bool cpu) {
int device_count;
checkCudaErrors(cudaGetDeviceCount(&device_count));
for (int device = 0; device < device_count; device++) {
cudaDeviceProp device_props;
cudaGetDeviceProperties(&device_props, device);
if (device_props.computeMode == cudaComputeModeProhibited)
continue;
fprintf(stderr, "found device: %s\n", device_props.name);
return device;
}
fprintf(stderr, "no device found\n");
return 1;
}
int main(int argc, char **argv) {
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
int device_id = get_cuda_device(false);
checkCudaErrors(cudaSetDevice(device_id));
srand(time(NULL));
// Create the stylesheet on the host.
uint16_t *host_stylesheet = (uint16_t *)malloc(MAX_ID);
for (int i = 0; i < MAX_ID; i++)
host_stylesheet[i] = rand();
// Create the DOM on the host.
int *host_ids = (int *)malloc(sizeof(int) * NODE_COUNT);
create_dom(host_ids);
// Allocate the IDs and copy over.
int *device_ids;
checkCudaErrors(cudaMalloc((void **)&device_ids, sizeof(int) * NODE_COUNT));
checkCudaErrors(cudaMemcpy(device_ids,
host_ids,
sizeof(int) * NODE_COUNT,
cudaMemcpyHostToDevice));
// Allocate the stylesheet and copy over.
uint16_t *device_stylesheet;
checkCudaErrors(cudaMalloc((void **)&device_stylesheet, sizeof(uint16_t) * MAX_ID));
checkCudaErrors(cudaMemcpy(device_stylesheet,
host_stylesheet,
sizeof(uint16_t) * MAX_ID,
cudaMemcpyHostToDevice));
// Allocate the matched rules.
uint16_t *device_matched_rules;
uint16_t *device_matched_rules_host_mirror = (uint16_t *)malloc(sizeof(uint16_t) * NODE_COUNT);
checkCudaErrors(cudaMalloc((uint16_t **)&device_matched_rules, sizeof(uint16_t) * NODE_COUNT));
// Create start/stop events.
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Execute the kernel on the GPU.
checkCudaErrors(cudaEventRecord(start));
match_selectors_device<<<NODE_COUNT / THREAD_COUNT, THREAD_COUNT>>>(device_ids,
device_stylesheet,
device_matched_rules);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
float gpu_elapsed = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&gpu_elapsed, start, stop));
fprintf(stderr, "Elapsed time: %f ms\n", gpu_elapsed);
for (int i = 0; i <
checkCudaErrors(cudaFree(device_matched_rules));
checkCudaErrors(cudaFree(device_stylesheet));
checkCudaErrors(cudaFree(device_ids));
cudaDeviceReset();
return 0;
}
|
7d611d58c28583f06dd440ff9c8a6e89d278ee20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************
* Copyright (C) 2013 x265 project
*
* Authors: Gopu Govindaswamy <[email protected]>
* Mandar Gurav <[email protected]>
* Mahesh Pittala <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at [email protected].
*****************************************************************************/
#include "ece408_competition.h"
#include "primitives.h"
#include "test/intrapredharness.h"
#include "cpu.h"
#include "TLibCommon/TComRom.h"
#include "TLibEncoder/TEncCfg.h"
#include "input/input.h"
#include "output/output.h"
#include "common.h"
#include "x265.h"
#include "getopt.h"
#include "PPA/ppa.h"
#include "encoder.h"
#include "TLibCommon/TComYuv.h"
#include "TLibCommon/TComPic.h"
#include "TLibCommon/TComPicYuv.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <fstream>
#include <algorithm>
#include "kernel.hip"
//Define this to verify the student intra prediction against the reference version
#define VERIFY
//#define VERBOSE
//Define this to dump all reference results to file (to compare between versions)
//#define DUMP_TO_FILE
//This is the filename where all reference results will be dumped ifdef DUMP_TO_FILE
#define DUMP_FILE "dump.bin"
using namespace x265;
ece408_intra_pred_result *ece408_competition_ref(TEncCfg *encoder, x265_picture *pics_in, int num_frames);
ece408_intra_pred_result *ece408_competition(ece408_frame *imgs, int num_frames);
bool ece408_compare(ece408_intra_pred_result *ref, ece408_intra_pred_result *student, int num_frames);
Pel *refAbove1, *refAbove2, *refLeft1, *refLeft2;
Pel* predBuf;
int predBufStride;
int predBufHeight;
TComYuv pred_yuv;
TComYuv orig_yuv;
TComSPS sps;
TComPPS pps;
x265_param *param;
ALIGN_VAR_32(Pel, tmp[33 * 32 * 32]);
ALIGN_VAR_32(Pel, buf_trans[32 * 32]);
static const char short_options[] = "o:f:F:r:i:b:s:q:m:hwV";
static const struct option long_options[] =
{
#if HIGH_BIT_DEPTH
{ "depth", required_argument, NULL, 0 },
#endif
{ "help", no_argument, NULL, 'h' },
{ "version", no_argument, NULL, 'V' },
{ "cpuid", required_argument, NULL, 0 },
{ "threads", required_argument, NULL, 0 },
{ "preset", required_argument, NULL, 'p' },
{ "tune", required_argument, NULL, 't' },
{ "frame-threads", required_argument, NULL, 'F' },
{ "log", required_argument, NULL, 0 },
{ "csv", required_argument, NULL, 0 },
{ "y4m", no_argument, NULL, 0 },
{ "no-progress", no_argument, NULL, 0 },
{ "output", required_argument, NULL, 'o' },
{ "input", required_argument, NULL, 0 },
{ "input-depth", required_argument, NULL, 0 },
{ "input-res", required_argument, NULL, 0 },
{ "input-csp", required_argument, NULL, 0 },
{ "fps", required_argument, NULL, 0 },
{ "frame-skip", required_argument, NULL, 0 },
{ "frames", required_argument, NULL, 'f' },
{ "recon", required_argument, NULL, 'r' },
{ "recon-depth", required_argument, NULL, 0 },
{ "no-wpp", no_argument, NULL, 0 },
{ "wpp", no_argument, NULL, 0 },
{ "ctu", required_argument, NULL, 's' },
{ "tu-intra-depth", required_argument, NULL, 0 },
{ "tu-inter-depth", required_argument, NULL, 0 },
{ "me", required_argument, NULL, 0 },
{ "subme", required_argument, NULL, 'm' },
{ "merange", required_argument, NULL, 0 },
{ "max-merge", required_argument, NULL, 0 },
{ "rdpenalty", required_argument, NULL, 0 },
{ "no-rect", no_argument, NULL, 0 },
{ "rect", no_argument, NULL, 0 },
{ "no-amp", no_argument, NULL, 0 },
{ "amp", no_argument, NULL, 0 },
{ "no-early-skip", no_argument, NULL, 0 },
{ "early-skip", no_argument, NULL, 0 },
{ "no-fast-cbf", no_argument, NULL, 0 },
{ "fast-cbf", no_argument, NULL, 0 },
{ "no-tskip", no_argument, NULL, 0 },
{ "tskip", no_argument, NULL, 0 },
{ "no-tskip-fast", no_argument, NULL, 0 },
{ "tskip-fast", no_argument, NULL, 0 },
{ "no-constrained-intra", no_argument, NULL, 0 },
{ "constrained-intra", no_argument, NULL, 0 },
{ "refresh", required_argument, NULL, 0 },
{ "keyint", required_argument, NULL, 'i' },
{ "rc-lookahead", required_argument, NULL, 0 },
{ "bframes", required_argument, NULL, 'b' },
{ "bframe-bias", required_argument, NULL, 0 },
{ "b-adapt", required_argument, NULL, 0 },
{ "no-b-pyramid", no_argument, NULL, 0 },
{ "b-pyramid", no_argument, NULL, 0 },
{ "ref", required_argument, NULL, 0 },
{ "no-weightp", no_argument, NULL, 0 },
{ "weightp", no_argument, NULL, 'w' },
{ "crf", required_argument, NULL, 0 },
{ "vbv-maxrate", required_argument, NULL, 0 },
{ "vbv-bufsize", required_argument, NULL, 0 },
{ "vbv-init", required_argument, NULL, 0 },
{ "bitrate", required_argument, NULL, 0 },
{ "qp", required_argument, NULL, 'q' },
{ "aq-mode", required_argument, NULL, 0 },
{ "aq-strength", required_argument, NULL, 0 },
{ "cbqpoffs", required_argument, NULL, 0 },
{ "crqpoffs", required_argument, NULL, 0 },
{ "rd", required_argument, NULL, 0 },
{ "no-signhide", no_argument, NULL, 0 },
{ "signhide", no_argument, NULL, 0 },
{ "no-lft", no_argument, NULL, 0 },
{ "lft", no_argument, NULL, 0 },
{ "no-sao", no_argument, NULL, 0 },
{ "sao", no_argument, NULL, 0 },
{ "sao-lcu-bounds", required_argument, NULL, 0 },
{ "sao-lcu-opt", required_argument, NULL, 0 },
{ "no-ssim", no_argument, NULL, 0 },
{ "ssim", no_argument, NULL, 0 },
{ "no-psnr", no_argument, NULL, 0 },
{ "psnr", no_argument, NULL, 0 },
{ "hash", required_argument, NULL, 0 },
{ "no-strong-intra-smoothing", no_argument, NULL, 0 },
{ "strong-intra-smoothing", no_argument, NULL, 0 },
{ 0, 0, 0, 0 }
};
struct CLIOptions
{
Input* input;
Output* recon;
std::fstream bitstreamFile;
bool bProgress;
bool bForceY4m;
uint32_t totalbytes;
uint32_t frameSkip; // number of frames to skip from the beginning
uint32_t framesToBeEncoded; // number of frames to encode
int64_t startTime;
int64_t prevUpdateTime;
/* in microseconds */
static const int UPDATE_INTERVAL = 250000;
CLIOptions()
{
input = NULL;
recon = NULL;
framesToBeEncoded = frameSkip = totalbytes = 0;
bProgress = true;
bForceY4m = false;
startTime = x265_mdate();
prevUpdateTime = 0;
}
void destroy();
void writeNALs(const x265_nal* nal, uint32_t nalcount);
void printVersion(x265_param *par);
void showHelp(x265_param *par);
bool parse(int argc, char **argv, x265_param* par);
};
void CLIOptions::destroy()
{
if (input)
input->release();
input = NULL;
if (recon)
recon->release();
recon = NULL;
}
void CLIOptions::writeNALs(const x265_nal* nal, uint32_t nalcount)
{
PPAScopeEvent(bitstream_write);
for (uint32_t i = 0; i < nalcount; i++)
{
bitstreamFile.write((const char*)nal->payload, nal->sizeBytes);
totalbytes += nal->sizeBytes;
nal++;
}
}
void CLIOptions::printVersion(x265_param *par)
{
fprintf(stderr, "x265 [info]: HEVC encoder version %s\n", x265_version_str);
fprintf(stderr, "x265 [info]: build info %s\n", x265_build_info_str);
x265_setup_primitives(par, -1);
}
void CLIOptions::showHelp(x265_param *par)
{
x265_param_default(par);
printVersion(par);
#define H0 printf
#define OPT(value) (value ? "enabled" : "disabled")
H0("\nSyntax: x265 [options] infile [-o] outfile\n");
H0(" infile can be YUV or Y4M\n");
H0(" outfile is raw HEVC bitstream\n");
H0("\nExecutable Options:\n");
H0("-h/--h Show this help text and exit\n");
H0("-V/--version Show version info and exit\n");
H0(" --cpuid Limit SIMD capability bitmap 0:auto 1:None. Default:0\n");
H0(" --threads Number of threads for thread pool (0: detect CPU core count, default)\n");
H0("-p/--preset ultrafast, veryfast, faster, fast, medium, slow, slower, veryslow, or placebo\n");
H0("-t/--tune Tune the settings for a particular type of source or situation\n");
H0("-F/--frame-threads Number of concurrently encoded frames. Default %d\n", par->frameNumThreads);
H0(" --log Logging level 0:ERROR 1:WARNING 2:INFO 3:DEBUG -1:NONE. Default %d\n", par->logLevel);
H0(" --csv Comma separated log file, log level >= 3 frame log, else one line per run\n");
H0(" --y4m Parse input stream as YUV4MPEG2 regardless of file extension\n");
H0(" --no-progress Disable CLI progress reports\n");
H0("-o/--output Bitstream output file name\n");
H0("\nInput Options:\n");
H0(" --input Raw YUV or Y4M input file name\n");
H0(" --input-depth Bit-depth of input file (YUV only) Default %d\n", par->inputBitDepth);
H0(" --input-res Source picture size [w x h], auto-detected if Y4M\n");
H0(" --input-csp Source color space parameter, auto-detected if Y4M\n");
H0(" --fps Source frame rate, auto-detected if Y4M\n");
H0(" --frame-skip Number of frames to skip at start of input file\n");
H0("-f/--frames Number of frames to be encoded. Default all\n");
H0("\nQuad-Tree analysis:\n");
H0(" --[no-]wpp Enable Wavefront Parallel Processing. Default %s\n", OPT(par->bEnableWavefront));
H0("-s/--ctu Maximum CU size. Default %dx%d\n", par->maxCUSize, par->maxCUSize);
H0(" --tu-intra-depth Max TU recursive depth for intra CUs. Default %d\n", par->tuQTMaxIntraDepth);
H0(" --tu-inter-depth Max TU recursive depth for inter CUs. Default %d\n", par->tuQTMaxInterDepth);
H0("\nTemporal / motion search options:\n");
H0(" --me Motion search method 0:dia 1:hex 2:umh 3:star 4:full. Default %d\n", par->searchMethod);
H0("-m/--subme Amount of subpel refinement to perform (0:least .. 7:most). Default %d \n", par->subpelRefine);
H0(" --merange Motion search range. Default %d\n", par->searchRange);
H0(" --[no-]rect Enable rectangular motion partitions Nx2N and 2NxN. Default %s\n", OPT(par->bEnableRectInter));
H0(" --[no-]amp Enable asymmetric motion partitions, requires --rect. Default %s\n", OPT(par->bEnableAMP));
H0(" --max-merge Maximum number of merge candidates. Default %d\n", par->maxNumMergeCand);
H0(" --[no-]early-skip Enable early SKIP detection. Default %s\n", OPT(par->bEnableEarlySkip));
H0(" --[no-]fast-cbf Enable Cbf fast mode \n \t\t\t\t Default : %s\n", OPT(par->bEnableCbfFastMode));
H0("\nSpatial / intra options:\n");
H0(" --rdpenalty penalty for 32x32 intra TU in non-I slices. 0:disabled 1:RD-penalty 2:maximum. Default %d\n", par->rdPenalty);
H0(" --[no-]tskip Enable intra transform skipping. Default %s\n", OPT(par->bEnableTransformSkip));
H0(" --[no-]tskip-fast Enable fast intra transform skipping. Default %s\n", OPT(par->bEnableTSkipFast));
H0(" --[no-]strong-intra-smoothing Enable strong intra smoothing for 32x32 blocks. Default %s\n", OPT(par->bEnableStrongIntraSmoothing));
H0(" --[no-]constrained-intra Constrained intra prediction (use only intra coded reference pixels) Default %s\n", OPT(par->bEnableConstrainedIntra));
H0("\nSlice decision options:\n");
H0(" --refresh Intra refresh type - 0:none, 1:CDR, 2:IDR (default: CDR) Default %d\n", par->decodingRefreshType);
H0("-i/--keyint Max intra period in frames. Default %d\n", par->keyframeMax);
H0(" --rc-lookahead Number of frames for frame-type lookahead (determines encoder latency) Default %d\n", par->lookaheadDepth);
H0(" --bframes Maximum number of consecutive b-frames (now it only enables B GOP structure) Default %d\n", par->bframes);
H0(" --bframe-bias Bias towards B frame decisions. Default %d\n", par->bFrameBias);
H0(" --b-adapt 0 - none, 1 - fast, 2 - full (trellis) adaptive B frame scheduling. Default %d\n", par->bFrameAdaptive);
H0(" --[no-]b-pyramid Use B-frames as references. Default %s\n", OPT(par->bBPyramid));
H0(" --ref max number of L0 references to be allowed (1 .. 16) Default %d\n", par->maxNumReferences);
H0("-w/--[no-]weightp Enable weighted prediction in P slices. Default %s\n", OPT(par->bEnableWeightedPred));
H0("\nQP, rate control and rate distortion options:\n");
H0(" --bitrate Target bitrate (kbps), implies ABR. Default %d\n", par->rc.bitrate);
H0(" --crf Quality-based VBR (0-51). Default %f\n", par->rc.rfConstant);
H0(" --vbv-maxrate Max local bitrate (kbit/s). Default %d\n", par->rc.vbvMaxBitrate);
H0(" --vbv-bufsize Set size of the VBV buffer (kbit). Default %d\n", par->rc.vbvBufferSize);
H0(" --vbv-init Initial VBV buffer occupancy. Default %f\n", par->rc.vbvBufferInit);
H0("-q/--qp Base QP for CQP mode. Default %d\n", par->rc.qp);
H0(" --aq-mode Mode for Adaptive Quantization - 0:none 1:aqVariance Default %d\n", par->rc.aqMode);
H0(" --aq-strength Reduces blocking and blurring in flat and textured areas.(0 to 3.0)<double> . Default %f\n", par->rc.aqStrength);
H0(" --cbqpoffs Chroma Cb QP Offset. Default %d\n", par->cbQpOffset);
H0(" --crqpoffs Chroma Cr QP Offset. Default %d\n", par->crQpOffset);
H0(" --rd Level of RD in mode decision 0:least....2:full RDO. Default %d\n", par->rdLevel);
H0(" --[no-]signhide Hide sign bit of one coeff per TU (rdo). Default %s\n", OPT(par->bEnableSignHiding));
H0("\nLoop filter:\n");
H0(" --[no-]lft Enable Loop Filter. Default %s\n", OPT(par->bEnableLoopFilter));
H0("\nSample Adaptive Offset loop filter:\n");
H0(" --[no-]sao Enable Sample Adaptive Offset. Default %s\n", OPT(par->bEnableSAO));
H0(" --sao-lcu-bounds 0: right/bottom boundary areas skipped 1: non-deblocked pixels are used. Default %d\n", par->saoLcuBoundary);
H0(" --sao-lcu-opt 0: SAO picture-based optimization, 1: SAO LCU-based optimization. Default %d\n", par->saoLcuBasedOptimization);
H0("\nQuality reporting metrics:\n");
H0(" --[no-]ssim Enable reporting SSIM metric scores. Default %s\n", OPT(par->bEnableSsim));
H0(" --[no-]psnr Enable reporting PSNR metric scores. Default %s\n", OPT(par->bEnablePsnr));
H0("\nReconstructed video options (debugging):\n");
H0("-r/--recon Reconstructed raw image YUV or Y4M output file name\n");
H0(" --recon-depth Bit-depth of reconstructed raw image file. Default 8\n");
H0("\nSEI options:\n");
H0(" --hash Decoded Picture Hash SEI 0: disabled, 1: MD5, 2: CRC, 3: Checksum. Default %d\n", par->decodedPictureHashSEI);
#undef OPT
#undef H0
exit(0);
}
bool CLIOptions::parse(int argc, char **argv, x265_param* par)
{
int berror = 0;
int help = 0;
int cpuid = 0;
int reconFileBitDepth = 0;
const char *inputfn = NULL;
const char *reconfn = NULL;
const char *bitstreamfn = NULL;
const char *inputRes = NULL;
const char *preset = "medium";
const char *tune = "psnr";
/* Presets are applied before all other options. */
for (optind = 0;; )
{
int c = getopt_long(argc, argv, short_options, long_options, NULL);
if (c == -1)
break;
if (c == 'p')
preset = optarg;
if (c == 't')
tune = optarg;
else if (c == '?')
return true;
}
if (x265_param_default_preset(param, preset, tune) < 0)
{
x265_log(NULL, X265_LOG_WARNING, "preset or tune unrecognized\n");
return true;
}
//MRJ Set max CU size to 32x32 so that frames are padded in Encoder::configure() to a multiple of 4x4, not a multiple of 8x8.
par->maxCUSize = 32;
for (optind = 0;; )
{
int long_options_index = -1;
int c = getopt_long(argc, argv, short_options, long_options, &long_options_index);
if (c == -1)
{
break;
}
switch (c)
{
case 'h':
showHelp(par);
break;
case 'V':
printVersion(par);
exit(0);
default:
if (long_options_index < 0 && c > 0)
{
for (size_t i = 0; i < sizeof(long_options) / sizeof(long_options[0]); i++)
{
if (long_options[i].val == c)
{
long_options_index = (int)i;
break;
}
}
if (long_options_index < 0)
{
/* getopt_long might have already printed an error message */
if (c != 63)
x265_log(NULL, X265_LOG_WARNING, "internal error: short option '%c' has no long option\n", c);
return true;
}
}
if (long_options_index < 0)
{
x265_log(NULL, X265_LOG_WARNING, "short option '%c' unrecognized\n", c);
return true;
}
#define OPT(longname) \
else if (!strcmp(long_options[long_options_index].name, longname))
if (0) ;
OPT("cpuid") cpuid = atoi(optarg);
OPT("frames") this->framesToBeEncoded = (uint32_t)atoi(optarg);
OPT("preset") preset = optarg;
OPT("tune") tune = optarg;
OPT("no-progress") this->bProgress = false;
OPT("frame-skip") this->frameSkip = (uint32_t)atoi(optarg);
OPT("output") bitstreamfn = optarg;
OPT("input") inputfn = optarg;
OPT("recon") reconfn = optarg;
OPT("input-depth") par->inputBitDepth = (uint32_t)atoi(optarg);
OPT("recon-depth") reconFileBitDepth = (uint32_t)atoi(optarg);
OPT("input-res") inputRes = optarg;
OPT("y4m") bForceY4m = true;
else
berror |= x265_param_parse(par, long_options[long_options_index].name, optarg);
if (berror)
{
const char *name = long_options_index > 0 ? long_options[long_options_index].name : argv[optind - 2];
x265_log(NULL, X265_LOG_ERROR, "invalid argument: %s = %s\n", name, optarg);
return true;
}
#undef OPT
}
}
if (optind < argc && !inputfn)
inputfn = argv[optind++];
if (optind < argc && !bitstreamfn)
bitstreamfn = argv[optind++];
if (optind < argc)
{
x265_log(par, X265_LOG_WARNING, "extra unused command arguments given <%s>\n", argv[optind]);
return true;
}
if (argc <= 1 || help)
showHelp(par);
if (inputfn == NULL || bitstreamfn == NULL)
{
x265_log(par, X265_LOG_ERROR, "input or output file not specified, try -V for help\n");
return true;
}
this->input = Input::open(inputfn, par->inputBitDepth, bForceY4m);
if (!this->input || this->input->isFail())
{
x265_log(par, X265_LOG_ERROR, "unable to open input file <%s>\n", inputfn);
return true;
}
if (this->input->getWidth())
{
/* parse the width, height, frame rate from the y4m file */
par->internalCsp = this->input->getColorSpace();
par->sourceWidth = this->input->getWidth();
par->sourceHeight = this->input->getHeight();
par->frameRate = (int)this->input->getRate();
}
else if (inputRes)
{
this->input->setColorSpace(par->internalCsp);
sscanf(inputRes, "%dx%d", &par->sourceWidth, &par->sourceHeight);
this->input->setDimensions(par->sourceWidth, par->sourceHeight);
this->input->setBitDepth(par->inputBitDepth);
}
else if (par->sourceHeight <= 0 || par->sourceWidth <= 0 || par->frameRate <= 0)
{
x265_log(par, X265_LOG_ERROR, "YUV input requires source width, height, and rate to be specified\n");
return true;
}
else
{
this->input->setDimensions(par->sourceWidth, par->sourceHeight);
this->input->setBitDepth(par->inputBitDepth);
}
int guess = this->input->guessFrameCount();
if (this->frameSkip)
{
this->input->skipFrames(this->frameSkip);
}
uint32_t fileFrameCount = guess < 0 ? 0 : (uint32_t)guess;
if (this->framesToBeEncoded && fileFrameCount)
this->framesToBeEncoded = X265_MIN(this->framesToBeEncoded, fileFrameCount - this->frameSkip);
else if (fileFrameCount)
this->framesToBeEncoded = fileFrameCount - this->frameSkip;
if (par->logLevel >= X265_LOG_INFO)
{
if (this->framesToBeEncoded == 0)
fprintf(stderr, "%s [info]: %dx%d %dHz %s, unknown frame count\n", input->getName(),
par->sourceWidth, par->sourceHeight, par->frameRate,
(par->internalCsp >= X265_CSP_I444) ? "C444" : (par->internalCsp >= X265_CSP_I422) ? "C422" : "C420");
else
fprintf(stderr, "%s [info]: %dx%d %dHz %s, frames %u - %d of %d\n", input->getName(),
par->sourceWidth, par->sourceHeight, par->frameRate,
(par->internalCsp >= X265_CSP_I444) ? "C444" : (par->internalCsp >= X265_CSP_I422) ? "C422" : "C420",
this->frameSkip, this->frameSkip + this->framesToBeEncoded - 1, fileFrameCount);
}
this->input->startReader();
if (reconfn)
{
if (reconFileBitDepth == 0)
reconFileBitDepth = par->inputBitDepth;
this->recon = Output::open(reconfn, par->sourceWidth, par->sourceHeight, reconFileBitDepth, par->frameRate, par->internalCsp);
if (this->recon->isFail())
{
x265_log(par, X265_LOG_WARNING, "unable to write reconstruction file\n");
this->recon->release();
this->recon = 0;
}
}
#if HIGH_BIT_DEPTH
if (par->inputBitDepth != 12 && par->inputBitDepth != 10 && par->inputBitDepth != 8)
{
x265_log(par, X265_LOG_ERROR, "Only bit depths of 8, 10, or 12 are supported\n");
return true;
}
#else
if (par->inputBitDepth != 8)
{
x265_log(par, X265_LOG_ERROR, "not compiled for bit depths greater than 8\n");
return true;
}
#endif // if HIGH_BIT_DEPTH
this->bitstreamFile.open(bitstreamfn, std::fstream::binary | std::fstream::out);
if (!this->bitstreamFile)
{
x265_log(NULL, X265_LOG_ERROR, "failed to open bitstream file <%s> for writing\n", bitstreamfn);
return true;
}
x265_setup_primitives(par, cpuid);
printVersion(par);
return false;
}
int main(int argc, char *argv[])
{
CLIOptions cliopt;
param = x265_param_alloc();
if (cliopt.parse(argc, argv, param))
{
cliopt.destroy();
exit(1);
}
param->bEnableStrongIntraSmoothing = false; //No strong intra smoothing for competition
TEncCfg *encoder = new TEncCfg();
if (!encoder)
{
x265_log(param, X265_LOG_ERROR, "failed to open encoder\n");
cliopt.destroy();
x265_cleanup();
exit(1);
}
// save a copy of final parameters in TEncCfg
memcpy(&encoder->param, param, sizeof(*param));
encoder->m_pad[0] = encoder->m_pad[1] = 0;
//MRJ the above (original) line always computes 8, let's set it to 4 instead to get the correct padding.
uint32_t minCUDepth = 4;
if ((param->sourceWidth % minCUDepth) != 0)
{
uint32_t padsize = 0;
uint32_t rem = param->sourceWidth % minCUDepth;
padsize = minCUDepth - rem;
param->sourceWidth += padsize;
encoder->m_pad[0] = padsize; //pad width
/* set the confirmation window offsets */
encoder->m_conformanceWindow.m_enabledFlag = true;
encoder->m_conformanceWindow.m_winRightOffset = encoder->m_pad[0];
}
//======== set pad size if height is not multiple of the minimum CU size =========
if ((param->sourceHeight % minCUDepth) != 0)
{
uint32_t padsize = 0;
uint32_t rem = param->sourceHeight % minCUDepth;
padsize = minCUDepth - rem;
param->sourceHeight += padsize;
encoder->m_pad[1] = padsize; //pad height
/* set the confirmation window offsets */
encoder->m_conformanceWindow.m_enabledFlag = true;
encoder->m_conformanceWindow.m_winBottomOffset = encoder->m_pad[1];
}
//Encoder *encoder_c = static_cast<Encoder*>(encoder);
//Initialize arrays for storing neighboring pixel values
refAbove1 = (Pel*)X265_MALLOC(Pel, 3 * MAX_CU_SIZE);
refAbove2 = (Pel*)X265_MALLOC(Pel, 3 * MAX_CU_SIZE);
refLeft1 = (Pel*)X265_MALLOC(Pel, 3 * MAX_CU_SIZE);
refLeft2 = (Pel*)X265_MALLOC(Pel, 3 * MAX_CU_SIZE);
//Save globals so we can restore them at the end
//We need to restore the original values before destroy()ing data structures because many of the destroy() functions
//use these globals to determine the size of their arrays
int g_maxCUDepth_bak = g_maxCUDepth;
int g_addCUDepth_bak = g_addCUDepth;
int g_maxCUWidth_bak = g_maxCUWidth;
int g_maxCUHeight_bak = g_maxCUHeight;
g_maxCUDepth = 0; //Disallow recursion to decompose frames into a regular grid of equal size CUs.
g_addCUDepth = 0;
//NOTE: has to be after x265_encoder_open() call, since that calls x265_set_globals(), which resets g_maxCUDepth.
x265_picture pic_orig;
x265_picture *pic_in = &pic_orig;
x265_picture_init(param, pic_in);
uint32_t inFrameCount = 0;
//Several pieces of the reference code assume 4:2:0 subsampling, so assert that here
if(param->internalCsp != X265_CSP_I420) {
fprintf(stderr, "Error: Input must use i420 colorspace (4:2:0 subsampling)\n");
exit(1);
}
#ifdef DUMP_TO_FILE
FILE *f = fopen(DUMP_FILE, "wb");
if(!f) {
fprintf(stderr, "Error opening dump file (" DUMP_FILE ")\n");
exit(1);
}
#endif
while (1)
{
pic_orig.poc = inFrameCount;
if (cliopt.framesToBeEncoded && inFrameCount >= cliopt.framesToBeEncoded)
break;
else if (cliopt.input->readPicture(pic_orig))
inFrameCount++;
else
break;
ece408_intra_pred_result *ref = ece408_competition_ref(encoder, pic_in, 1);
#ifdef DUMP_TO_FILE
ref[0].write_to_file(f);
#endif
ece408_frame frame(param->sourceWidth, param->sourceHeight, pic_in);
//Uncomment this one to run the student version
ece408_intra_pred_result *student = ece408_competition(&frame, 1);
//Uncomment this one instead to run the reference version twice (to test the compare function)
//ece408_intra_pred_result *student = ece408_competition_ref(encoder, pic_in, 1);
#ifdef VERIFY
if(!ece408_compare(ref, student, 1)) {
printf("Error in frame %d\n", inFrameCount);
exit(1);
}
#endif
for(int i = 0; i < 4*1; i++) {
ref[i].destroy();
student[i].destroy();
}
delete[] ref;
delete[] student;
}
#ifdef DUMP_TO_FILE
fclose(f);
#endif
#ifdef VERIFY
printf("Success!\n");
#endif
//Restore globals
g_maxCUDepth = g_maxCUDepth_bak;
g_addCUDepth = g_addCUDepth_bak;
g_maxCUWidth = g_maxCUWidth_bak;
g_maxCUHeight = g_maxCUHeight_bak;
delete encoder;
X265_FREE(refAbove1);
X265_FREE(refAbove2);
X265_FREE(refLeft1);
X265_FREE(refLeft2);
orig_yuv.destroy();
pred_yuv.destroy();
x265_cleanup(); /* Free library singletons */
cliopt.destroy();
x265_param_free(param);
return 0;
}
//channel = 0 for luma, 1 for cb, 2 for cr
void ece408_intra_pred_channel(int luma_size, int channel, int32_t *sad_ptr) {
//#define VERBOSE
#ifdef VERBOSE
printf("refAbove1: ");
for(int i = 0; i < 32*3; i++)
printf("%d ", refAbove1[i]);
printf("\n");
printf("refAbove2: ");
for(int i = 0; i < 32*3; i++)
printf("%d ", refAbove2[i]);
printf("\n");
printf("refLeft1: ");
for(int i = 0; i < 32*3; i++)
printf("%d ", refLeft1[i]);
printf("\n");
printf("refLeft2: ");
for(int i = 0; i < 32*3; i++)
printf("%d ", refLeft2[i]);
printf("\n");
#endif
int chroma_size = luma_size >> 1;
bool luma = (channel == 0);
bool cb = (channel == 1);
bool cr = (channel == 2);
int size = luma ? luma_size : chroma_size;
Pel* orig_pel = luma ? orig_yuv.getLumaAddr(0, size) : (cb ? orig_yuv.getCbAddr(0, size) : orig_yuv.getCrAddr(0, size));
Pel* pred_pel = luma ? pred_yuv.getLumaAddr(0, size) : (cb ? pred_yuv.getCbAddr(0, size) : pred_yuv.getCrAddr(0, size));
uint32_t stride = luma ? pred_yuv.getStride() : pred_yuv.getCStride();
Pel *pAboveUnfilt = (cr ? refAbove2 : refAbove1) + size - 1;
Pel *pAboveFilt = luma ? (refAbove2 + size - 1) : pAboveUnfilt;
Pel *pLeftUnfilt = (cr ? refLeft2 : refLeft1) + size - 1;
Pel *pLeftFilt = luma ? (refLeft2 + size - 1) : pLeftUnfilt;
int nLog2SizeMinus2 = g_convertToBit[size];
pixelcmp_t sa8d = primitives.sa8d[nLog2SizeMinus2];
#ifdef VERBOSE
printf("Channel %d Orig:\n", channel);
for(int row = 0; row < size; row++) {
for(int col = 0; col < size; col++) {
printf("%02X ", orig_pel[row*size + col]);
}
printf("\n");
}
#endif
int sad;
Pel *above = (luma && size >= 8) ? pAboveFilt : pAboveUnfilt;
Pel *left = (luma && size >= 8) ? pLeftFilt : pLeftUnfilt;
//TODO check to make sure we're filtering in all the right conditions
primitives.intra_pred[nLog2SizeMinus2][0](pred_pel, stride, left, above, /*dummy dirMode argument*/ 0, /*dummy filter argument*/ 0);
sad = sa8d(orig_pel, stride, pred_pel, stride);
*(sad_ptr++) = sad;
#ifdef VERBOSE
printf("Planar SATD = %d\n", sad);
#endif
//TODO check to make sure we're filtering in all the right conditions
//DC (mode 1)
primitives.intra_pred[nLog2SizeMinus2][1](pred_pel, stride, pLeftUnfilt, pAboveUnfilt, /*dummy dirMode argument*/ 1, (luma && size <= 16));
sad = sa8d(orig_pel, stride, pred_pel, stride);
*(sad_ptr++) = sad;
#ifdef VERBOSE
printf("Size = %d, stride = %d, DC:\n", size, stride);
for(int row = 0; row < size; row++) {
for(int col = 0; col < size; col++) {
printf("%02X ", pred_pel[row*size+col]);
}
printf("\n");
}
printf("SATD = %d\n", sad);
#endif
primitives.transpose[nLog2SizeMinus2](buf_trans, orig_pel, stride);
//TODO check to make sure we're filtering in all the right conditions
primitives.intra_pred_allangs[nLog2SizeMinus2](tmp, pAboveUnfilt, pLeftUnfilt, pAboveFilt, pLeftFilt, (luma && (size <= 16)));
#ifdef VERBOSE
printf("Angular SATD = ", channel);
#endif
for (int mode = 2; mode < 35; mode++)
{
bool modeHor = (mode < 18);
Pel *cmp = (modeHor ? buf_trans : orig_pel);
intptr_t srcStride = (modeHor ? size : stride);
#ifdef VERBOSE
printf("Pred mode %d\n", mode);
for(int r = 0; r < size; r++) {
for(int c = 0; c < size; c++)
printf("%02X ", tmp[(mode-2) * (size * size) + r * size + c]);
printf("\n");
}
#endif
sad = sa8d(cmp, srcStride, &tmp[(mode - 2) * (size * size)], size);
*(sad_ptr++) = sad;
#ifdef VERBOSE
printf("%d, ", sad);
#endif
}
#ifdef VERBOSE
printf("\n");
#endif
}
//#undef VERBOSE
inline bool isAvailable(int frameWidth, int frameHeight, int r, int c) {
return (r >= 0 && c >= 0 && r < frameHeight && c < frameWidth);
}
//Channel is 0 for luma, 1 for Cb, 2 for Cr
void getReferencePixels(x265_picture *pic, unsigned int width, unsigned int height, unsigned int luma_size, unsigned int cu_index, Pel* refAbove, Pel* refLeft, Pel* refAboveFlt, Pel* refLeftFlt, int channel) {
uint32_t cuWidth = (channel == 0) ? luma_size : (luma_size / 2);
uint32_t cuWidth2 = cuWidth << 1;
uint32_t frameWidth = (channel == 0) ? width : (width / 2);
uint32_t frameHeight = (channel == 0) ? height : (height / 2);
uint32_t frameStride = pic->stride[channel];
//Base address of the array containing the required color component of the reconstructed image (equivalent to the original image for the ECE408 competition)
Pel *baseAddress = (Pel *)pic->planes[channel];
int32_t topLeftR = (cu_index / (frameWidth / cuWidth)) * cuWidth;
int32_t topLeftC = (cu_index % (frameWidth / cuWidth)) * cuWidth;
//Find value for bottom-left neighbor
//Search left from bottom to top
bool bottomLeftFound = false;
for(int32_t neighborR = (topLeftR + cuWidth2 - 1), neighborC = (topLeftC - 1); neighborR >= (topLeftR - 1); neighborR--)
if(isAvailable(frameWidth, frameHeight, neighborR, neighborC)) {
bottomLeftFound = true;
refLeft[cuWidth2] = baseAddress[neighborR*frameStride + neighborC];
//printf("Bottom left found on left (%d, %d) %d\n", neighborR, neighborC, refLeft[cuWidth2+1]);
break;
}
//If not found, search top from left to right
if(!bottomLeftFound) {
for(int32_t neighborR = (topLeftR - 1), neighborC = topLeftC; neighborC <= (int32_t)(topLeftC + cuWidth2 - 1); neighborC++) {
if(isAvailable(frameWidth, frameHeight, neighborR, neighborC)) {
bottomLeftFound = true;
refLeft[cuWidth2] = baseAddress[neighborR*frameStride + neighborC];
//printf("Bottom left found on top (%d, %d) %d \n", neighborR, neighborC, refLeft[cuWidth2+1]);
break;
}
}
}
//If still not found, no reference samples are available, so assign 50% value to all neighbors
if(!bottomLeftFound) {
refLeft[cuWidth2] = 1 << (BIT_DEPTH - 1);
//printf("Bottom left not found, using DC value %d\n", refLeft[cuWidth2]);
}
//Traverse bottom-left to top-left to top-right. If a pixel is not available, use the one before it (one below or to the left)
for(int32_t neighborR = (topLeftR + cuWidth2 - 2), neighborC = (topLeftC - 1), idx = cuWidth2 - 1; neighborR >= (topLeftR - 1); neighborR--, idx--) {
if(isAvailable(frameWidth, frameHeight, neighborR, neighborC)) {
refLeft[idx] = baseAddress[neighborR*frameStride + neighborC];
//printf("Left[%d] (%d %d) available: %d\n", idx, neighborR, neighborC, refLeft[idx]);
}
else {
refLeft[idx] = refLeft[idx+1];
//printf("Left[%d] (%d %d) not available: %d\n", idx, neighborR, neighborC, refLeft[idx]);
}
}
//Include the top-left corner in both refLeft and refAbove
refAbove[0] = refLeft[0];
for(int32_t neighborR = (topLeftR - 1), neighborC = topLeftC, idx = 1; neighborC <= (int32_t)(topLeftC + cuWidth2 - 1); neighborC++, idx++) {
if(isAvailable(frameWidth, frameHeight, neighborR, neighborC)) {
refAbove[idx] = baseAddress[neighborR*frameStride + neighborC];
//printf("Above[%d] (%d %d) available: %d\n", idx, neighborR, neighborC, refAbove[idx]);
}
else {
refAbove[idx] = refAbove[idx-1];
//printf("Above[%d] (%d %d) not available: %d\n", idx, neighborR, neighborC, refAbove[idx]);
}
}
//Make filtered version (for luma only)
if(channel == 0) {
//Special cases for the corner, bottom, and right pixels, [1 2 1] FIR filter for the rest
//pF[ 1 ][ 1 ] = ( p[ 1 ][ 0 ] + 2 * p[ 1 ][ 1 ] + p[ 0 ][ 1 ] + 2 ) >> 2
refLeftFlt[0] = refAboveFlt[0] = (refLeft[1] + 2 * refLeft[0] + refAbove[1] + 2) >> 2;
for(uint32_t idx = 1; idx < cuWidth2; idx++) {
refLeftFlt[idx] = (refLeft[idx-1] + 2 * refLeft[idx] + refLeft[idx+1] + 2) >> 2;
refAboveFlt[idx] = (refAbove[idx-1] + 2 * refAbove[idx] + refAbove[idx+1] + 2) >> 2;
}
refLeftFlt[cuWidth2] = refLeft[cuWidth2];
refAboveFlt[cuWidth2] = refAbove[cuWidth2];
}
}
//luma_size is the (square) block size of luma blocks, chroma blocks are assumed (luma_size/2)x(luma_size/2)
void ece408_intra_pred(x265_picture *pic, int width, int height, int luma_size, unsigned int cu_index, int32_t *y_ptr, int32_t *cb_ptr, int32_t *cr_ptr) {
unsigned int luma_r = (cu_index / (width / luma_size)) * luma_size;
unsigned int luma_c = (cu_index % (width / luma_size)) * luma_size;
//Copy luma bytes into orig_yuv
Pel *walker = orig_yuv.getLumaAddr();
for(int i = 0; i < luma_size; i++) {
memcpy(walker, ((Pel *)pic->planes[0]) + (((luma_r + i)*pic->stride[0]) + luma_c), luma_size*sizeof(*walker));
walker += luma_size;
}
if(luma_size > 4) {
//Copy chroma bytes into orig_yuv
unsigned int chroma_r = luma_r / 2;
unsigned int chroma_c = luma_c / 2;
unsigned int chroma_size = luma_size / 2;
walker = orig_yuv.getCbAddr();
for(unsigned int i = 0; i < chroma_size; i++) {
memcpy(walker, ((Pel *)pic->planes[1]) + (((chroma_r + i)*pic->stride[1]) + chroma_c), chroma_size*sizeof(*walker));
walker += chroma_size;
}
walker = orig_yuv.getCrAddr();
for(unsigned int i = 0; i < chroma_size; i++) {
memcpy(walker, ((Pel *)pic->planes[2]) + (((chroma_r + i)*pic->stride[2]) + chroma_c), chroma_size*sizeof(*walker));
walker += chroma_size;
}
}
//Get the unfiltered and filtered reference pixels. Position them (cuWidth-1) elements into their respective arrays so that the
//angular prediction function can use the unused space at the beginning of the array to extend the reference pixels as described
//in equations 8-48 and 8-56 in Section 8.4.4.2.6 of the H.265 standard.
getReferencePixels(pic, width, height, luma_size, cu_index, refAbove1+luma_size-1, refLeft1+luma_size-1, refAbove2+luma_size-1, refLeft2+luma_size-1, /*channel*/ 0);
#ifdef VERBOSE
printf("Above ");
for(int i = 0; i < (2*luma_size+1); i++)
printf("%3d ", refAbove1[i+luma_size-1]);
printf("\nLeft ");
for(int i = 0; i < (2*luma_size+1); i++)
printf("%3d ", refLeft1[i+luma_size-1]);
printf("\nAboveFilt ");
for(int i = 0; i < (2*luma_size+1); i++)
printf("%3d ", refAbove2[i+luma_size-1]);
printf("\nLeftFilt ");
for(int i = 0; i < (2*luma_size+1); i++)
printf("%3d ", refLeft2[i+luma_size-1]);
printf("\n");
#endif
ece408_intra_pred_channel(luma_size, 0, y_ptr);
if(luma_size > 4) { //No 2x2 chroma blocks, and 4x4 chroma blocks are covered with 8x8 luma
getReferencePixels(pic, width, height, luma_size, cu_index, (refAbove1+luma_size/2)-1, refLeft1+(luma_size/2)-1, NULL, NULL, /*channel*/ 1);
ece408_intra_pred_channel(luma_size, 1, cb_ptr);
getReferencePixels(pic, width, height, luma_size, cu_index, (refAbove2+luma_size/2)-1, refLeft2+(luma_size/2)-1, NULL, NULL, /*channel*/ 2);
ece408_intra_pred_channel(luma_size, 2, cr_ptr);
}
}
ece408_intra_pred_result *ece408_competition_ref(TEncCfg *encoder, x265_picture *pics_in, int num_frames) {
ece408_intra_pred_result *ret = new ece408_intra_pred_result[4*num_frames]; //4x4,8x8,16x16,32x32
ece408_intra_pred_result *cur_result = ret;
for(int i = 0; i < num_frames; i++) {
for(int luma_size_shift = 2; luma_size_shift <= 5; luma_size_shift++) {
int luma_size = 1 << luma_size_shift; // luma_size x luma_size luma PBs
cur_result->create(param->sourceWidth, param->sourceHeight, luma_size);
int32_t *y_satd_results = cur_result->y_satd_results;
uint8_t *y_modes = cur_result->y_modes;
int32_t *cb_satd_results = cur_result->cb_satd_results;
uint8_t *cb_modes = cur_result->cb_modes;
int32_t *cr_satd_results = cur_result->cr_satd_results;
uint8_t *cr_modes = cur_result->cr_modes;
orig_yuv.destroy();
orig_yuv.create(luma_size, luma_size, X265_CSP_I420);
pred_yuv.destroy();
pred_yuv.create(luma_size, luma_size, X265_CSP_I420);
for(unsigned int cuIndex = 0; cuIndex < (unsigned int)((encoder->param.sourceWidth/luma_size)*(encoder->param.sourceHeight/luma_size)); cuIndex++) {
ece408_intra_pred(&(pics_in[i]),
encoder->param.sourceWidth,
encoder->param.sourceHeight,
luma_size,
cuIndex,
&(y_satd_results[35*cuIndex]),
&(cb_satd_results[35*cuIndex]),
&(cr_satd_results[35*cuIndex]));
//printf("SATD results: ");
//for(int l = 0; l < 35; l++) {
// printf("(%d, %d, %d, %d) ", l, y_satd_results[35*cuIndex+l], cb_satd_results[35*cuIndex+l], cr_satd_results[35*cuIndex+l]);
//}
//printf("\n");
for(int mode = 0; mode < 35; mode++) {
y_satd_results[35*cuIndex + mode] = (y_satd_results[35*cuIndex + mode] << 8) | mode;
if(luma_size > 4) {
cb_satd_results[35*cuIndex + mode] = (cb_satd_results[35*cuIndex + mode] << 8) | mode;
cr_satd_results[35*cuIndex + mode] = (cr_satd_results[35*cuIndex + mode] << 8) | mode;
}
}
std::sort(&(y_satd_results[35*cuIndex]), &(y_satd_results[35*cuIndex+35]));
if(luma_size > 4) {
std::sort(&(cb_satd_results[35*cuIndex]), &(cb_satd_results[35*cuIndex+35]));
std::sort(&(cr_satd_results[35*cuIndex]), &(cr_satd_results[35*cuIndex+35]));
}
for(int mode = 0; mode < 35; mode++) {
y_modes[35*cuIndex+mode] = (y_satd_results[35*cuIndex+mode] & 0xFF);
y_satd_results[35*cuIndex+mode] >>= 8;
if(luma_size > 4) {
cb_modes[35*cuIndex+mode] = (cb_satd_results[35*cuIndex+mode] & 0xFF);
cb_satd_results[35*cuIndex+mode] >>= 8;
cr_modes[35*cuIndex+mode] = (cr_satd_results[35*cuIndex+mode] & 0xFF);
cr_satd_results[35*cuIndex+mode] >>= 8;
}
}
}
#ifdef MODE_HIST
int ymode_hist[35], cbmode_hist[35], crmode_hist[35];
for(int l = 0; l < 35; l++) {
ymode_hist[l] = cbmode_hist[l] = crmode_hist[l] = 0;
}
for(int l = 0; l < (35*((param->sourceWidth/luma_size)*(param->sourceHeight/luma_size))); l += 35) { //+= 1 to make sure all modes are accounted for, += 35 for histogram of best modes
ymode_hist[y_modes[l]]++;
if(luma_size > 4) {
cbmode_hist[cb_modes[l]]++;
crmode_hist[cr_modes[l]]++;
}
}
printf("ymode hist: ");
for(int l = 0; l < 35; l++)
printf("%d ", ymode_hist[l]);
if(luma_size > 4) {
printf("\ncbmode hist: ");
for(int l = 0; l < 35; l++)
printf("%d ", cbmode_hist[l]);
printf("\ncrmode hist: ");
for(int l = 0; l < 35; l++)
printf("%d ", crmode_hist[l]);
}
printf("\n");
#endif
cur_result++;
}
}
return ret;
}
//TODO sort student results by satd result *and* mode number to make sure we have *exactly* the same bytes in
//both arrays, even if several modes have the same SATD value.
//We want to do the sort here so that students are not required to (it's not necessary in a real x265 use case).
bool ece408_compare(ece408_intra_pred_result *ref, ece408_intra_pred_result *student, int num_frames) {
if(student == NULL) {
printf("Student result array pointer is NULL\n");
return false;
}
for(int i = 0; i < (4*num_frames); i++) {
int block_offset=35;
int b_s = 0;
for(int idx=0;idx<35;idx++)
{
//printf("\nSERIAL OFFSET: %d\n", block_offset+idx);
printf("Serial code : For mode: %u Ref value:%i\n",ref[b_s].y_modes[block_offset+idx], ref[b_s].y_satd_results[block_offset+idx]);
}
if(ref[i].luma_block_size != student[i].luma_block_size) {
printf("Ref result %d luma block size = %d, student = %d\n", i, ref[i].luma_block_size, student[i].luma_block_size);
return false;
}
if(ref[i].num_blocks != student[i].num_blocks) {
printf("Ref result %d num_blocks = %d, student = %d\n", i, ref[i].num_blocks, student[i].num_blocks);
return false;
}
if(memcmp(ref[i].y_modes, student[i].y_modes, 35*ref[i].num_blocks*sizeof(*ref[i].y_modes))) {
printf("Result %d, ref and student y_modes mismatched\n", i);
return false;
}
if(memcmp(ref[i].y_satd_results, student[i].y_satd_results, 35*ref[i].num_blocks*sizeof(*ref[i].y_satd_results))) {
printf("Result %d, ref and student y_satd_results mismatched\n", i);
return false;
}
if(ref[i].luma_block_size > 4) {
if(memcmp(ref[i].cb_modes, student[i].cb_modes, 35*ref[i].num_blocks*sizeof(*ref[i].cb_modes))) {
printf("Result %d, ref and student cb_modes mismatched\n", i);
return false;
}
if(memcmp(ref[i].cb_satd_results, student[i].cb_satd_results, 35*ref[i].num_blocks*sizeof(*ref[i].cb_satd_results))) {
printf("Result %d, ref and student cb_satd_results mismatched\n", i);
return false;
}
if(memcmp(ref[i].cr_modes, student[i].cr_modes, 35*ref[i].num_blocks*sizeof(*ref[i].cr_modes))) {
printf("Result %d, ref and student cr_modes mismatched\n", i);
return false;
}
if(memcmp(ref[i].cr_satd_results, student[i].cr_satd_results, 35*ref[i].num_blocks*sizeof(*ref[i].cr_satd_results))) {
printf("Result %d, ref and student cr_satd_results mismatched\n", i);
return false;
}
}
}
return true;
}
ece408_intra_pred_result *ece408_competition(ece408_frame *imgs, int num_frames) {
//Fill in your own!
(void)imgs;
ece408_frame * imgs1 = (ece408_frame *)imgs;
ece408_intra_pred_result *ret = new ece408_intra_pred_result[4*num_frames]; //8x8,16x16,32x32,64x64
ece408_intra_pred_result *cur_result = ret;
unsigned int debug_print = ((imgs->height+4-1)/4)*((imgs->width+4-1)/4);
printf("debug print : %d\n",debug_print );
hipError_t cuda_ret;
uint8_t *d_y,
*d_cr,
*d_cb;
unsigned int y_size = ((imgs->width) * (imgs->height)) * sizeof(uint8_t);
printf("\n Y SIZE : %u\n", y_size);
unsigned int cr_size,
cb_size;
// TO DO : do we need a ceil here ?
cr_size = cb_size = (y_size/2);
// Allocate global memorcy for y, cr, cb components of the frame
cuda_ret = hipMalloc((void **) &d_y, y_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMalloc((void **) &d_cr, cr_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMalloc((void **) &d_cb, cb_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipDeviceSynchronize();
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMemcpy(d_y, imgs1->y, y_size, hipMemcpyHostToDevice);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMemcpy(d_cr, imgs1->cr, cr_size, hipMemcpyHostToDevice);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMemcpy(d_cb, imgs1->cb, cb_size, hipMemcpyHostToDevice);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
printf("I AM AT THE END CUDA MEMCPY STAGE 1\n");
cuda_ret = hipDeviceSynchronize();
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
for(int i = 0; i < num_frames; i++) {
int res_count = 0;
//for(int luma_size_shift = 2; luma_size_shift <= 5; luma_size_shift++) {
for(int luma_size_shift = 2; luma_size_shift <=3; luma_size_shift++) {
int luma_size = 1 << luma_size_shift; // luma_size x luma_size luma PBs
//cur_result->create(32, 32, luma_size);
cur_result->create(imgs1->width, imgs1->height, luma_size);
// Start
int32_t *d_res_y;
int32_t *d_res_cr;
int32_t *d_res_cb;
uint8_t *d_y_modes;
uint8_t *d_cr_modes;
uint8_t *d_cb_modes;
//unsigned int y_res_size = (35 * (cur_result->num_blocks));
unsigned int num_blocks = ((imgs->height+luma_size-1)/luma_size)*((imgs->width+luma_size-1)/luma_size);
unsigned int y_res_size = 35*num_blocks*sizeof(int32_t);
unsigned int mode_size = 35*num_blocks*sizeof(uint8_t);
unsigned int cr_res_size,
cb_res_size;
printf("No.of blocks launched:%u\n",y_res_size/sizeof(int32_t));
cr_res_size = cb_res_size = y_res_size;
// Allocate result in the device
cuda_ret = hipMalloc((void **) &d_res_y, y_res_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMalloc((void **) &d_y_modes, mode_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if ( luma_size > 4 )
{
cuda_ret = hipMalloc((void **) &d_res_cr, cr_res_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMalloc((void **) &d_res_cb, cb_res_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMalloc((void **) &d_cr_modes, mode_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMalloc((void **) &d_cb_modes, mode_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
cuda_ret = hipDeviceSynchronize();
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Grid dimension
dim3 dimGrid = dim3((int)ceil((imgs->width)/(float)luma_size), (int)ceil((imgs->height)/(float)luma_size), 1);
// Block dimension
dim3 dimBlock = dim3(luma_size, luma_size, 1);
//int neighbour_array_size = luma_size*2+1;
printf("\n KERNEL CONFIG: %d %d %d %d\n", dimGrid.x, dimGrid.y, dimBlock.x, dimBlock.y);
hipLaunchKernelGGL(( hevcPredictionKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_y, d_cr, d_cb, d_res_y, d_res_cr, d_res_cb, d_y_modes, d_cr_modes, d_cb_modes, imgs->height, imgs->width);
cuda_ret = hipDeviceSynchronize();
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
printf("current result num_block_size is %d\n", num_blocks);
printf("from serial code num_block is %d\n",cur_result->num_blocks);
cuda_ret = hipMemcpy(cur_result->y_satd_results, d_res_y, y_res_size, hipMemcpyDeviceToHost);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
/*
cuda_ret = hipMemcpy(cur_result->cr_satd_results, d_res_cr, cr_res_size, hipMemcpyDeviceToHost);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMemcpy(cur_result->cb_satd_results, d_res_cb, cb_res_size, hipMemcpyDeviceToHost);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
*/
cuda_ret = hipMemcpy(cur_result->y_modes, d_y_modes,mode_size, hipMemcpyDeviceToHost);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
/*
cuda_ret = hipMemcpy(cur_result->cr_modes, d_cr_modes, cr_res_size, hipMemcpyDeviceToHost);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMemcpy(cur_result->cb_modes, d_cb_modes, cb_res_size, hipMemcpyDeviceToHost);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
*/
cur_result++;
res_count++;
}
}
return ret;
}
| 7d611d58c28583f06dd440ff9c8a6e89d278ee20.cu | /*****************************************************************************
* Copyright (C) 2013 x265 project
*
* Authors: Gopu Govindaswamy <[email protected]>
* Mandar Gurav <[email protected]>
* Mahesh Pittala <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at [email protected].
*****************************************************************************/
#include "ece408_competition.h"
#include "primitives.h"
#include "test/intrapredharness.h"
#include "cpu.h"
#include "TLibCommon/TComRom.h"
#include "TLibEncoder/TEncCfg.h"
#include "input/input.h"
#include "output/output.h"
#include "common.h"
#include "x265.h"
#include "getopt.h"
#include "PPA/ppa.h"
#include "encoder.h"
#include "TLibCommon/TComYuv.h"
#include "TLibCommon/TComPic.h"
#include "TLibCommon/TComPicYuv.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <fstream>
#include <algorithm>
#include "kernel.cu"
//Define this to verify the student intra prediction against the reference version
#define VERIFY
//#define VERBOSE
//Define this to dump all reference results to file (to compare between versions)
//#define DUMP_TO_FILE
//This is the filename where all reference results will be dumped ifdef DUMP_TO_FILE
#define DUMP_FILE "dump.bin"
using namespace x265;
ece408_intra_pred_result *ece408_competition_ref(TEncCfg *encoder, x265_picture *pics_in, int num_frames);
ece408_intra_pred_result *ece408_competition(ece408_frame *imgs, int num_frames);
bool ece408_compare(ece408_intra_pred_result *ref, ece408_intra_pred_result *student, int num_frames);
Pel *refAbove1, *refAbove2, *refLeft1, *refLeft2;
Pel* predBuf;
int predBufStride;
int predBufHeight;
TComYuv pred_yuv;
TComYuv orig_yuv;
TComSPS sps;
TComPPS pps;
x265_param *param;
ALIGN_VAR_32(Pel, tmp[33 * 32 * 32]);
ALIGN_VAR_32(Pel, buf_trans[32 * 32]);
static const char short_options[] = "o:f:F:r:i:b:s:q:m:hwV";
static const struct option long_options[] =
{
#if HIGH_BIT_DEPTH
{ "depth", required_argument, NULL, 0 },
#endif
{ "help", no_argument, NULL, 'h' },
{ "version", no_argument, NULL, 'V' },
{ "cpuid", required_argument, NULL, 0 },
{ "threads", required_argument, NULL, 0 },
{ "preset", required_argument, NULL, 'p' },
{ "tune", required_argument, NULL, 't' },
{ "frame-threads", required_argument, NULL, 'F' },
{ "log", required_argument, NULL, 0 },
{ "csv", required_argument, NULL, 0 },
{ "y4m", no_argument, NULL, 0 },
{ "no-progress", no_argument, NULL, 0 },
{ "output", required_argument, NULL, 'o' },
{ "input", required_argument, NULL, 0 },
{ "input-depth", required_argument, NULL, 0 },
{ "input-res", required_argument, NULL, 0 },
{ "input-csp", required_argument, NULL, 0 },
{ "fps", required_argument, NULL, 0 },
{ "frame-skip", required_argument, NULL, 0 },
{ "frames", required_argument, NULL, 'f' },
{ "recon", required_argument, NULL, 'r' },
{ "recon-depth", required_argument, NULL, 0 },
{ "no-wpp", no_argument, NULL, 0 },
{ "wpp", no_argument, NULL, 0 },
{ "ctu", required_argument, NULL, 's' },
{ "tu-intra-depth", required_argument, NULL, 0 },
{ "tu-inter-depth", required_argument, NULL, 0 },
{ "me", required_argument, NULL, 0 },
{ "subme", required_argument, NULL, 'm' },
{ "merange", required_argument, NULL, 0 },
{ "max-merge", required_argument, NULL, 0 },
{ "rdpenalty", required_argument, NULL, 0 },
{ "no-rect", no_argument, NULL, 0 },
{ "rect", no_argument, NULL, 0 },
{ "no-amp", no_argument, NULL, 0 },
{ "amp", no_argument, NULL, 0 },
{ "no-early-skip", no_argument, NULL, 0 },
{ "early-skip", no_argument, NULL, 0 },
{ "no-fast-cbf", no_argument, NULL, 0 },
{ "fast-cbf", no_argument, NULL, 0 },
{ "no-tskip", no_argument, NULL, 0 },
{ "tskip", no_argument, NULL, 0 },
{ "no-tskip-fast", no_argument, NULL, 0 },
{ "tskip-fast", no_argument, NULL, 0 },
{ "no-constrained-intra", no_argument, NULL, 0 },
{ "constrained-intra", no_argument, NULL, 0 },
{ "refresh", required_argument, NULL, 0 },
{ "keyint", required_argument, NULL, 'i' },
{ "rc-lookahead", required_argument, NULL, 0 },
{ "bframes", required_argument, NULL, 'b' },
{ "bframe-bias", required_argument, NULL, 0 },
{ "b-adapt", required_argument, NULL, 0 },
{ "no-b-pyramid", no_argument, NULL, 0 },
{ "b-pyramid", no_argument, NULL, 0 },
{ "ref", required_argument, NULL, 0 },
{ "no-weightp", no_argument, NULL, 0 },
{ "weightp", no_argument, NULL, 'w' },
{ "crf", required_argument, NULL, 0 },
{ "vbv-maxrate", required_argument, NULL, 0 },
{ "vbv-bufsize", required_argument, NULL, 0 },
{ "vbv-init", required_argument, NULL, 0 },
{ "bitrate", required_argument, NULL, 0 },
{ "qp", required_argument, NULL, 'q' },
{ "aq-mode", required_argument, NULL, 0 },
{ "aq-strength", required_argument, NULL, 0 },
{ "cbqpoffs", required_argument, NULL, 0 },
{ "crqpoffs", required_argument, NULL, 0 },
{ "rd", required_argument, NULL, 0 },
{ "no-signhide", no_argument, NULL, 0 },
{ "signhide", no_argument, NULL, 0 },
{ "no-lft", no_argument, NULL, 0 },
{ "lft", no_argument, NULL, 0 },
{ "no-sao", no_argument, NULL, 0 },
{ "sao", no_argument, NULL, 0 },
{ "sao-lcu-bounds", required_argument, NULL, 0 },
{ "sao-lcu-opt", required_argument, NULL, 0 },
{ "no-ssim", no_argument, NULL, 0 },
{ "ssim", no_argument, NULL, 0 },
{ "no-psnr", no_argument, NULL, 0 },
{ "psnr", no_argument, NULL, 0 },
{ "hash", required_argument, NULL, 0 },
{ "no-strong-intra-smoothing", no_argument, NULL, 0 },
{ "strong-intra-smoothing", no_argument, NULL, 0 },
{ 0, 0, 0, 0 }
};
struct CLIOptions
{
Input* input;
Output* recon;
std::fstream bitstreamFile;
bool bProgress;
bool bForceY4m;
uint32_t totalbytes;
uint32_t frameSkip; // number of frames to skip from the beginning
uint32_t framesToBeEncoded; // number of frames to encode
int64_t startTime;
int64_t prevUpdateTime;
/* in microseconds */
static const int UPDATE_INTERVAL = 250000;
CLIOptions()
{
input = NULL;
recon = NULL;
framesToBeEncoded = frameSkip = totalbytes = 0;
bProgress = true;
bForceY4m = false;
startTime = x265_mdate();
prevUpdateTime = 0;
}
void destroy();
void writeNALs(const x265_nal* nal, uint32_t nalcount);
void printVersion(x265_param *par);
void showHelp(x265_param *par);
bool parse(int argc, char **argv, x265_param* par);
};
void CLIOptions::destroy()
{
if (input)
input->release();
input = NULL;
if (recon)
recon->release();
recon = NULL;
}
void CLIOptions::writeNALs(const x265_nal* nal, uint32_t nalcount)
{
PPAScopeEvent(bitstream_write);
for (uint32_t i = 0; i < nalcount; i++)
{
bitstreamFile.write((const char*)nal->payload, nal->sizeBytes);
totalbytes += nal->sizeBytes;
nal++;
}
}
void CLIOptions::printVersion(x265_param *par)
{
fprintf(stderr, "x265 [info]: HEVC encoder version %s\n", x265_version_str);
fprintf(stderr, "x265 [info]: build info %s\n", x265_build_info_str);
x265_setup_primitives(par, -1);
}
void CLIOptions::showHelp(x265_param *par)
{
x265_param_default(par);
printVersion(par);
#define H0 printf
#define OPT(value) (value ? "enabled" : "disabled")
H0("\nSyntax: x265 [options] infile [-o] outfile\n");
H0(" infile can be YUV or Y4M\n");
H0(" outfile is raw HEVC bitstream\n");
H0("\nExecutable Options:\n");
H0("-h/--h Show this help text and exit\n");
H0("-V/--version Show version info and exit\n");
H0(" --cpuid Limit SIMD capability bitmap 0:auto 1:None. Default:0\n");
H0(" --threads Number of threads for thread pool (0: detect CPU core count, default)\n");
H0("-p/--preset ultrafast, veryfast, faster, fast, medium, slow, slower, veryslow, or placebo\n");
H0("-t/--tune Tune the settings for a particular type of source or situation\n");
H0("-F/--frame-threads Number of concurrently encoded frames. Default %d\n", par->frameNumThreads);
H0(" --log Logging level 0:ERROR 1:WARNING 2:INFO 3:DEBUG -1:NONE. Default %d\n", par->logLevel);
H0(" --csv Comma separated log file, log level >= 3 frame log, else one line per run\n");
H0(" --y4m Parse input stream as YUV4MPEG2 regardless of file extension\n");
H0(" --no-progress Disable CLI progress reports\n");
H0("-o/--output Bitstream output file name\n");
H0("\nInput Options:\n");
H0(" --input Raw YUV or Y4M input file name\n");
H0(" --input-depth Bit-depth of input file (YUV only) Default %d\n", par->inputBitDepth);
H0(" --input-res Source picture size [w x h], auto-detected if Y4M\n");
H0(" --input-csp Source color space parameter, auto-detected if Y4M\n");
H0(" --fps Source frame rate, auto-detected if Y4M\n");
H0(" --frame-skip Number of frames to skip at start of input file\n");
H0("-f/--frames Number of frames to be encoded. Default all\n");
H0("\nQuad-Tree analysis:\n");
H0(" --[no-]wpp Enable Wavefront Parallel Processing. Default %s\n", OPT(par->bEnableWavefront));
H0("-s/--ctu Maximum CU size. Default %dx%d\n", par->maxCUSize, par->maxCUSize);
H0(" --tu-intra-depth Max TU recursive depth for intra CUs. Default %d\n", par->tuQTMaxIntraDepth);
H0(" --tu-inter-depth Max TU recursive depth for inter CUs. Default %d\n", par->tuQTMaxInterDepth);
H0("\nTemporal / motion search options:\n");
H0(" --me Motion search method 0:dia 1:hex 2:umh 3:star 4:full. Default %d\n", par->searchMethod);
H0("-m/--subme Amount of subpel refinement to perform (0:least .. 7:most). Default %d \n", par->subpelRefine);
H0(" --merange Motion search range. Default %d\n", par->searchRange);
H0(" --[no-]rect Enable rectangular motion partitions Nx2N and 2NxN. Default %s\n", OPT(par->bEnableRectInter));
H0(" --[no-]amp Enable asymmetric motion partitions, requires --rect. Default %s\n", OPT(par->bEnableAMP));
H0(" --max-merge Maximum number of merge candidates. Default %d\n", par->maxNumMergeCand);
H0(" --[no-]early-skip Enable early SKIP detection. Default %s\n", OPT(par->bEnableEarlySkip));
H0(" --[no-]fast-cbf Enable Cbf fast mode \n \t\t\t\t Default : %s\n", OPT(par->bEnableCbfFastMode));
H0("\nSpatial / intra options:\n");
H0(" --rdpenalty penalty for 32x32 intra TU in non-I slices. 0:disabled 1:RD-penalty 2:maximum. Default %d\n", par->rdPenalty);
H0(" --[no-]tskip Enable intra transform skipping. Default %s\n", OPT(par->bEnableTransformSkip));
H0(" --[no-]tskip-fast Enable fast intra transform skipping. Default %s\n", OPT(par->bEnableTSkipFast));
H0(" --[no-]strong-intra-smoothing Enable strong intra smoothing for 32x32 blocks. Default %s\n", OPT(par->bEnableStrongIntraSmoothing));
H0(" --[no-]constrained-intra Constrained intra prediction (use only intra coded reference pixels) Default %s\n", OPT(par->bEnableConstrainedIntra));
H0("\nSlice decision options:\n");
H0(" --refresh Intra refresh type - 0:none, 1:CDR, 2:IDR (default: CDR) Default %d\n", par->decodingRefreshType);
H0("-i/--keyint Max intra period in frames. Default %d\n", par->keyframeMax);
H0(" --rc-lookahead Number of frames for frame-type lookahead (determines encoder latency) Default %d\n", par->lookaheadDepth);
H0(" --bframes Maximum number of consecutive b-frames (now it only enables B GOP structure) Default %d\n", par->bframes);
H0(" --bframe-bias Bias towards B frame decisions. Default %d\n", par->bFrameBias);
H0(" --b-adapt 0 - none, 1 - fast, 2 - full (trellis) adaptive B frame scheduling. Default %d\n", par->bFrameAdaptive);
H0(" --[no-]b-pyramid Use B-frames as references. Default %s\n", OPT(par->bBPyramid));
H0(" --ref max number of L0 references to be allowed (1 .. 16) Default %d\n", par->maxNumReferences);
H0("-w/--[no-]weightp Enable weighted prediction in P slices. Default %s\n", OPT(par->bEnableWeightedPred));
H0("\nQP, rate control and rate distortion options:\n");
H0(" --bitrate Target bitrate (kbps), implies ABR. Default %d\n", par->rc.bitrate);
H0(" --crf Quality-based VBR (0-51). Default %f\n", par->rc.rfConstant);
H0(" --vbv-maxrate Max local bitrate (kbit/s). Default %d\n", par->rc.vbvMaxBitrate);
H0(" --vbv-bufsize Set size of the VBV buffer (kbit). Default %d\n", par->rc.vbvBufferSize);
H0(" --vbv-init Initial VBV buffer occupancy. Default %f\n", par->rc.vbvBufferInit);
H0("-q/--qp Base QP for CQP mode. Default %d\n", par->rc.qp);
H0(" --aq-mode Mode for Adaptive Quantization - 0:none 1:aqVariance Default %d\n", par->rc.aqMode);
H0(" --aq-strength Reduces blocking and blurring in flat and textured areas.(0 to 3.0)<double> . Default %f\n", par->rc.aqStrength);
H0(" --cbqpoffs Chroma Cb QP Offset. Default %d\n", par->cbQpOffset);
H0(" --crqpoffs Chroma Cr QP Offset. Default %d\n", par->crQpOffset);
H0(" --rd Level of RD in mode decision 0:least....2:full RDO. Default %d\n", par->rdLevel);
H0(" --[no-]signhide Hide sign bit of one coeff per TU (rdo). Default %s\n", OPT(par->bEnableSignHiding));
H0("\nLoop filter:\n");
H0(" --[no-]lft Enable Loop Filter. Default %s\n", OPT(par->bEnableLoopFilter));
H0("\nSample Adaptive Offset loop filter:\n");
H0(" --[no-]sao Enable Sample Adaptive Offset. Default %s\n", OPT(par->bEnableSAO));
H0(" --sao-lcu-bounds 0: right/bottom boundary areas skipped 1: non-deblocked pixels are used. Default %d\n", par->saoLcuBoundary);
H0(" --sao-lcu-opt 0: SAO picture-based optimization, 1: SAO LCU-based optimization. Default %d\n", par->saoLcuBasedOptimization);
H0("\nQuality reporting metrics:\n");
H0(" --[no-]ssim Enable reporting SSIM metric scores. Default %s\n", OPT(par->bEnableSsim));
H0(" --[no-]psnr Enable reporting PSNR metric scores. Default %s\n", OPT(par->bEnablePsnr));
H0("\nReconstructed video options (debugging):\n");
H0("-r/--recon Reconstructed raw image YUV or Y4M output file name\n");
H0(" --recon-depth Bit-depth of reconstructed raw image file. Default 8\n");
H0("\nSEI options:\n");
H0(" --hash Decoded Picture Hash SEI 0: disabled, 1: MD5, 2: CRC, 3: Checksum. Default %d\n", par->decodedPictureHashSEI);
#undef OPT
#undef H0
exit(0);
}
bool CLIOptions::parse(int argc, char **argv, x265_param* par)
{
int berror = 0;
int help = 0;
int cpuid = 0;
int reconFileBitDepth = 0;
const char *inputfn = NULL;
const char *reconfn = NULL;
const char *bitstreamfn = NULL;
const char *inputRes = NULL;
const char *preset = "medium";
const char *tune = "psnr";
/* Presets are applied before all other options. */
for (optind = 0;; )
{
int c = getopt_long(argc, argv, short_options, long_options, NULL);
if (c == -1)
break;
if (c == 'p')
preset = optarg;
if (c == 't')
tune = optarg;
else if (c == '?')
return true;
}
if (x265_param_default_preset(param, preset, tune) < 0)
{
x265_log(NULL, X265_LOG_WARNING, "preset or tune unrecognized\n");
return true;
}
//MRJ Set max CU size to 32x32 so that frames are padded in Encoder::configure() to a multiple of 4x4, not a multiple of 8x8.
par->maxCUSize = 32;
for (optind = 0;; )
{
int long_options_index = -1;
int c = getopt_long(argc, argv, short_options, long_options, &long_options_index);
if (c == -1)
{
break;
}
switch (c)
{
case 'h':
showHelp(par);
break;
case 'V':
printVersion(par);
exit(0);
default:
if (long_options_index < 0 && c > 0)
{
for (size_t i = 0; i < sizeof(long_options) / sizeof(long_options[0]); i++)
{
if (long_options[i].val == c)
{
long_options_index = (int)i;
break;
}
}
if (long_options_index < 0)
{
/* getopt_long might have already printed an error message */
if (c != 63)
x265_log(NULL, X265_LOG_WARNING, "internal error: short option '%c' has no long option\n", c);
return true;
}
}
if (long_options_index < 0)
{
x265_log(NULL, X265_LOG_WARNING, "short option '%c' unrecognized\n", c);
return true;
}
#define OPT(longname) \
else if (!strcmp(long_options[long_options_index].name, longname))
if (0) ;
OPT("cpuid") cpuid = atoi(optarg);
OPT("frames") this->framesToBeEncoded = (uint32_t)atoi(optarg);
OPT("preset") preset = optarg;
OPT("tune") tune = optarg;
OPT("no-progress") this->bProgress = false;
OPT("frame-skip") this->frameSkip = (uint32_t)atoi(optarg);
OPT("output") bitstreamfn = optarg;
OPT("input") inputfn = optarg;
OPT("recon") reconfn = optarg;
OPT("input-depth") par->inputBitDepth = (uint32_t)atoi(optarg);
OPT("recon-depth") reconFileBitDepth = (uint32_t)atoi(optarg);
OPT("input-res") inputRes = optarg;
OPT("y4m") bForceY4m = true;
else
berror |= x265_param_parse(par, long_options[long_options_index].name, optarg);
if (berror)
{
const char *name = long_options_index > 0 ? long_options[long_options_index].name : argv[optind - 2];
x265_log(NULL, X265_LOG_ERROR, "invalid argument: %s = %s\n", name, optarg);
return true;
}
#undef OPT
}
}
if (optind < argc && !inputfn)
inputfn = argv[optind++];
if (optind < argc && !bitstreamfn)
bitstreamfn = argv[optind++];
if (optind < argc)
{
x265_log(par, X265_LOG_WARNING, "extra unused command arguments given <%s>\n", argv[optind]);
return true;
}
if (argc <= 1 || help)
showHelp(par);
if (inputfn == NULL || bitstreamfn == NULL)
{
x265_log(par, X265_LOG_ERROR, "input or output file not specified, try -V for help\n");
return true;
}
this->input = Input::open(inputfn, par->inputBitDepth, bForceY4m);
if (!this->input || this->input->isFail())
{
x265_log(par, X265_LOG_ERROR, "unable to open input file <%s>\n", inputfn);
return true;
}
if (this->input->getWidth())
{
/* parse the width, height, frame rate from the y4m file */
par->internalCsp = this->input->getColorSpace();
par->sourceWidth = this->input->getWidth();
par->sourceHeight = this->input->getHeight();
par->frameRate = (int)this->input->getRate();
}
else if (inputRes)
{
this->input->setColorSpace(par->internalCsp);
sscanf(inputRes, "%dx%d", &par->sourceWidth, &par->sourceHeight);
this->input->setDimensions(par->sourceWidth, par->sourceHeight);
this->input->setBitDepth(par->inputBitDepth);
}
else if (par->sourceHeight <= 0 || par->sourceWidth <= 0 || par->frameRate <= 0)
{
x265_log(par, X265_LOG_ERROR, "YUV input requires source width, height, and rate to be specified\n");
return true;
}
else
{
this->input->setDimensions(par->sourceWidth, par->sourceHeight);
this->input->setBitDepth(par->inputBitDepth);
}
int guess = this->input->guessFrameCount();
if (this->frameSkip)
{
this->input->skipFrames(this->frameSkip);
}
uint32_t fileFrameCount = guess < 0 ? 0 : (uint32_t)guess;
if (this->framesToBeEncoded && fileFrameCount)
this->framesToBeEncoded = X265_MIN(this->framesToBeEncoded, fileFrameCount - this->frameSkip);
else if (fileFrameCount)
this->framesToBeEncoded = fileFrameCount - this->frameSkip;
if (par->logLevel >= X265_LOG_INFO)
{
if (this->framesToBeEncoded == 0)
fprintf(stderr, "%s [info]: %dx%d %dHz %s, unknown frame count\n", input->getName(),
par->sourceWidth, par->sourceHeight, par->frameRate,
(par->internalCsp >= X265_CSP_I444) ? "C444" : (par->internalCsp >= X265_CSP_I422) ? "C422" : "C420");
else
fprintf(stderr, "%s [info]: %dx%d %dHz %s, frames %u - %d of %d\n", input->getName(),
par->sourceWidth, par->sourceHeight, par->frameRate,
(par->internalCsp >= X265_CSP_I444) ? "C444" : (par->internalCsp >= X265_CSP_I422) ? "C422" : "C420",
this->frameSkip, this->frameSkip + this->framesToBeEncoded - 1, fileFrameCount);
}
this->input->startReader();
if (reconfn)
{
if (reconFileBitDepth == 0)
reconFileBitDepth = par->inputBitDepth;
this->recon = Output::open(reconfn, par->sourceWidth, par->sourceHeight, reconFileBitDepth, par->frameRate, par->internalCsp);
if (this->recon->isFail())
{
x265_log(par, X265_LOG_WARNING, "unable to write reconstruction file\n");
this->recon->release();
this->recon = 0;
}
}
#if HIGH_BIT_DEPTH
if (par->inputBitDepth != 12 && par->inputBitDepth != 10 && par->inputBitDepth != 8)
{
x265_log(par, X265_LOG_ERROR, "Only bit depths of 8, 10, or 12 are supported\n");
return true;
}
#else
if (par->inputBitDepth != 8)
{
x265_log(par, X265_LOG_ERROR, "not compiled for bit depths greater than 8\n");
return true;
}
#endif // if HIGH_BIT_DEPTH
this->bitstreamFile.open(bitstreamfn, std::fstream::binary | std::fstream::out);
if (!this->bitstreamFile)
{
x265_log(NULL, X265_LOG_ERROR, "failed to open bitstream file <%s> for writing\n", bitstreamfn);
return true;
}
x265_setup_primitives(par, cpuid);
printVersion(par);
return false;
}
int main(int argc, char *argv[])
{
CLIOptions cliopt;
param = x265_param_alloc();
if (cliopt.parse(argc, argv, param))
{
cliopt.destroy();
exit(1);
}
param->bEnableStrongIntraSmoothing = false; //No strong intra smoothing for competition
TEncCfg *encoder = new TEncCfg();
if (!encoder)
{
x265_log(param, X265_LOG_ERROR, "failed to open encoder\n");
cliopt.destroy();
x265_cleanup();
exit(1);
}
// save a copy of final parameters in TEncCfg
memcpy(&encoder->param, param, sizeof(*param));
encoder->m_pad[0] = encoder->m_pad[1] = 0;
//MRJ the above (original) line always computes 8, let's set it to 4 instead to get the correct padding.
uint32_t minCUDepth = 4;
if ((param->sourceWidth % minCUDepth) != 0)
{
uint32_t padsize = 0;
uint32_t rem = param->sourceWidth % minCUDepth;
padsize = minCUDepth - rem;
param->sourceWidth += padsize;
encoder->m_pad[0] = padsize; //pad width
/* set the confirmation window offsets */
encoder->m_conformanceWindow.m_enabledFlag = true;
encoder->m_conformanceWindow.m_winRightOffset = encoder->m_pad[0];
}
//======== set pad size if height is not multiple of the minimum CU size =========
if ((param->sourceHeight % minCUDepth) != 0)
{
uint32_t padsize = 0;
uint32_t rem = param->sourceHeight % minCUDepth;
padsize = minCUDepth - rem;
param->sourceHeight += padsize;
encoder->m_pad[1] = padsize; //pad height
/* set the confirmation window offsets */
encoder->m_conformanceWindow.m_enabledFlag = true;
encoder->m_conformanceWindow.m_winBottomOffset = encoder->m_pad[1];
}
//Encoder *encoder_c = static_cast<Encoder*>(encoder);
//Initialize arrays for storing neighboring pixel values
refAbove1 = (Pel*)X265_MALLOC(Pel, 3 * MAX_CU_SIZE);
refAbove2 = (Pel*)X265_MALLOC(Pel, 3 * MAX_CU_SIZE);
refLeft1 = (Pel*)X265_MALLOC(Pel, 3 * MAX_CU_SIZE);
refLeft2 = (Pel*)X265_MALLOC(Pel, 3 * MAX_CU_SIZE);
//Save globals so we can restore them at the end
//We need to restore the original values before destroy()ing data structures because many of the destroy() functions
//use these globals to determine the size of their arrays
int g_maxCUDepth_bak = g_maxCUDepth;
int g_addCUDepth_bak = g_addCUDepth;
int g_maxCUWidth_bak = g_maxCUWidth;
int g_maxCUHeight_bak = g_maxCUHeight;
g_maxCUDepth = 0; //Disallow recursion to decompose frames into a regular grid of equal size CUs.
g_addCUDepth = 0;
//NOTE: has to be after x265_encoder_open() call, since that calls x265_set_globals(), which resets g_maxCUDepth.
x265_picture pic_orig;
x265_picture *pic_in = &pic_orig;
x265_picture_init(param, pic_in);
uint32_t inFrameCount = 0;
//Several pieces of the reference code assume 4:2:0 subsampling, so assert that here
if(param->internalCsp != X265_CSP_I420) {
fprintf(stderr, "Error: Input must use i420 colorspace (4:2:0 subsampling)\n");
exit(1);
}
#ifdef DUMP_TO_FILE
FILE *f = fopen(DUMP_FILE, "wb");
if(!f) {
fprintf(stderr, "Error opening dump file (" DUMP_FILE ")\n");
exit(1);
}
#endif
while (1)
{
pic_orig.poc = inFrameCount;
if (cliopt.framesToBeEncoded && inFrameCount >= cliopt.framesToBeEncoded)
break;
else if (cliopt.input->readPicture(pic_orig))
inFrameCount++;
else
break;
ece408_intra_pred_result *ref = ece408_competition_ref(encoder, pic_in, 1);
#ifdef DUMP_TO_FILE
ref[0].write_to_file(f);
#endif
ece408_frame frame(param->sourceWidth, param->sourceHeight, pic_in);
//Uncomment this one to run the student version
ece408_intra_pred_result *student = ece408_competition(&frame, 1);
//Uncomment this one instead to run the reference version twice (to test the compare function)
//ece408_intra_pred_result *student = ece408_competition_ref(encoder, pic_in, 1);
#ifdef VERIFY
if(!ece408_compare(ref, student, 1)) {
printf("Error in frame %d\n", inFrameCount);
exit(1);
}
#endif
for(int i = 0; i < 4*1; i++) {
ref[i].destroy();
student[i].destroy();
}
delete[] ref;
delete[] student;
}
#ifdef DUMP_TO_FILE
fclose(f);
#endif
#ifdef VERIFY
printf("Success!\n");
#endif
//Restore globals
g_maxCUDepth = g_maxCUDepth_bak;
g_addCUDepth = g_addCUDepth_bak;
g_maxCUWidth = g_maxCUWidth_bak;
g_maxCUHeight = g_maxCUHeight_bak;
delete encoder;
X265_FREE(refAbove1);
X265_FREE(refAbove2);
X265_FREE(refLeft1);
X265_FREE(refLeft2);
orig_yuv.destroy();
pred_yuv.destroy();
x265_cleanup(); /* Free library singletons */
cliopt.destroy();
x265_param_free(param);
return 0;
}
//channel = 0 for luma, 1 for cb, 2 for cr
void ece408_intra_pred_channel(int luma_size, int channel, int32_t *sad_ptr) {
//#define VERBOSE
#ifdef VERBOSE
printf("refAbove1: ");
for(int i = 0; i < 32*3; i++)
printf("%d ", refAbove1[i]);
printf("\n");
printf("refAbove2: ");
for(int i = 0; i < 32*3; i++)
printf("%d ", refAbove2[i]);
printf("\n");
printf("refLeft1: ");
for(int i = 0; i < 32*3; i++)
printf("%d ", refLeft1[i]);
printf("\n");
printf("refLeft2: ");
for(int i = 0; i < 32*3; i++)
printf("%d ", refLeft2[i]);
printf("\n");
#endif
int chroma_size = luma_size >> 1;
bool luma = (channel == 0);
bool cb = (channel == 1);
bool cr = (channel == 2);
int size = luma ? luma_size : chroma_size;
Pel* orig_pel = luma ? orig_yuv.getLumaAddr(0, size) : (cb ? orig_yuv.getCbAddr(0, size) : orig_yuv.getCrAddr(0, size));
Pel* pred_pel = luma ? pred_yuv.getLumaAddr(0, size) : (cb ? pred_yuv.getCbAddr(0, size) : pred_yuv.getCrAddr(0, size));
uint32_t stride = luma ? pred_yuv.getStride() : pred_yuv.getCStride();
Pel *pAboveUnfilt = (cr ? refAbove2 : refAbove1) + size - 1;
Pel *pAboveFilt = luma ? (refAbove2 + size - 1) : pAboveUnfilt;
Pel *pLeftUnfilt = (cr ? refLeft2 : refLeft1) + size - 1;
Pel *pLeftFilt = luma ? (refLeft2 + size - 1) : pLeftUnfilt;
int nLog2SizeMinus2 = g_convertToBit[size];
pixelcmp_t sa8d = primitives.sa8d[nLog2SizeMinus2];
#ifdef VERBOSE
printf("Channel %d Orig:\n", channel);
for(int row = 0; row < size; row++) {
for(int col = 0; col < size; col++) {
printf("%02X ", orig_pel[row*size + col]);
}
printf("\n");
}
#endif
int sad;
Pel *above = (luma && size >= 8) ? pAboveFilt : pAboveUnfilt;
Pel *left = (luma && size >= 8) ? pLeftFilt : pLeftUnfilt;
//TODO check to make sure we're filtering in all the right conditions
primitives.intra_pred[nLog2SizeMinus2][0](pred_pel, stride, left, above, /*dummy dirMode argument*/ 0, /*dummy filter argument*/ 0);
sad = sa8d(orig_pel, stride, pred_pel, stride);
*(sad_ptr++) = sad;
#ifdef VERBOSE
printf("Planar SATD = %d\n", sad);
#endif
//TODO check to make sure we're filtering in all the right conditions
//DC (mode 1)
primitives.intra_pred[nLog2SizeMinus2][1](pred_pel, stride, pLeftUnfilt, pAboveUnfilt, /*dummy dirMode argument*/ 1, (luma && size <= 16));
sad = sa8d(orig_pel, stride, pred_pel, stride);
*(sad_ptr++) = sad;
#ifdef VERBOSE
printf("Size = %d, stride = %d, DC:\n", size, stride);
for(int row = 0; row < size; row++) {
for(int col = 0; col < size; col++) {
printf("%02X ", pred_pel[row*size+col]);
}
printf("\n");
}
printf("SATD = %d\n", sad);
#endif
primitives.transpose[nLog2SizeMinus2](buf_trans, orig_pel, stride);
//TODO check to make sure we're filtering in all the right conditions
primitives.intra_pred_allangs[nLog2SizeMinus2](tmp, pAboveUnfilt, pLeftUnfilt, pAboveFilt, pLeftFilt, (luma && (size <= 16)));
#ifdef VERBOSE
printf("Angular SATD = ", channel);
#endif
for (int mode = 2; mode < 35; mode++)
{
bool modeHor = (mode < 18);
Pel *cmp = (modeHor ? buf_trans : orig_pel);
intptr_t srcStride = (modeHor ? size : stride);
#ifdef VERBOSE
printf("Pred mode %d\n", mode);
for(int r = 0; r < size; r++) {
for(int c = 0; c < size; c++)
printf("%02X ", tmp[(mode-2) * (size * size) + r * size + c]);
printf("\n");
}
#endif
sad = sa8d(cmp, srcStride, &tmp[(mode - 2) * (size * size)], size);
*(sad_ptr++) = sad;
#ifdef VERBOSE
printf("%d, ", sad);
#endif
}
#ifdef VERBOSE
printf("\n");
#endif
}
//#undef VERBOSE
inline bool isAvailable(int frameWidth, int frameHeight, int r, int c) {
return (r >= 0 && c >= 0 && r < frameHeight && c < frameWidth);
}
//Channel is 0 for luma, 1 for Cb, 2 for Cr
void getReferencePixels(x265_picture *pic, unsigned int width, unsigned int height, unsigned int luma_size, unsigned int cu_index, Pel* refAbove, Pel* refLeft, Pel* refAboveFlt, Pel* refLeftFlt, int channel) {
uint32_t cuWidth = (channel == 0) ? luma_size : (luma_size / 2);
uint32_t cuWidth2 = cuWidth << 1;
uint32_t frameWidth = (channel == 0) ? width : (width / 2);
uint32_t frameHeight = (channel == 0) ? height : (height / 2);
uint32_t frameStride = pic->stride[channel];
//Base address of the array containing the required color component of the reconstructed image (equivalent to the original image for the ECE408 competition)
Pel *baseAddress = (Pel *)pic->planes[channel];
int32_t topLeftR = (cu_index / (frameWidth / cuWidth)) * cuWidth;
int32_t topLeftC = (cu_index % (frameWidth / cuWidth)) * cuWidth;
//Find value for bottom-left neighbor
//Search left from bottom to top
bool bottomLeftFound = false;
for(int32_t neighborR = (topLeftR + cuWidth2 - 1), neighborC = (topLeftC - 1); neighborR >= (topLeftR - 1); neighborR--)
if(isAvailable(frameWidth, frameHeight, neighborR, neighborC)) {
bottomLeftFound = true;
refLeft[cuWidth2] = baseAddress[neighborR*frameStride + neighborC];
//printf("Bottom left found on left (%d, %d) %d\n", neighborR, neighborC, refLeft[cuWidth2+1]);
break;
}
//If not found, search top from left to right
if(!bottomLeftFound) {
for(int32_t neighborR = (topLeftR - 1), neighborC = topLeftC; neighborC <= (int32_t)(topLeftC + cuWidth2 - 1); neighborC++) {
if(isAvailable(frameWidth, frameHeight, neighborR, neighborC)) {
bottomLeftFound = true;
refLeft[cuWidth2] = baseAddress[neighborR*frameStride + neighborC];
//printf("Bottom left found on top (%d, %d) %d \n", neighborR, neighborC, refLeft[cuWidth2+1]);
break;
}
}
}
//If still not found, no reference samples are available, so assign 50% value to all neighbors
if(!bottomLeftFound) {
refLeft[cuWidth2] = 1 << (BIT_DEPTH - 1);
//printf("Bottom left not found, using DC value %d\n", refLeft[cuWidth2]);
}
//Traverse bottom-left to top-left to top-right. If a pixel is not available, use the one before it (one below or to the left)
for(int32_t neighborR = (topLeftR + cuWidth2 - 2), neighborC = (topLeftC - 1), idx = cuWidth2 - 1; neighborR >= (topLeftR - 1); neighborR--, idx--) {
if(isAvailable(frameWidth, frameHeight, neighborR, neighborC)) {
refLeft[idx] = baseAddress[neighborR*frameStride + neighborC];
//printf("Left[%d] (%d %d) available: %d\n", idx, neighborR, neighborC, refLeft[idx]);
}
else {
refLeft[idx] = refLeft[idx+1];
//printf("Left[%d] (%d %d) not available: %d\n", idx, neighborR, neighborC, refLeft[idx]);
}
}
//Include the top-left corner in both refLeft and refAbove
refAbove[0] = refLeft[0];
for(int32_t neighborR = (topLeftR - 1), neighborC = topLeftC, idx = 1; neighborC <= (int32_t)(topLeftC + cuWidth2 - 1); neighborC++, idx++) {
if(isAvailable(frameWidth, frameHeight, neighborR, neighborC)) {
refAbove[idx] = baseAddress[neighborR*frameStride + neighborC];
//printf("Above[%d] (%d %d) available: %d\n", idx, neighborR, neighborC, refAbove[idx]);
}
else {
refAbove[idx] = refAbove[idx-1];
//printf("Above[%d] (%d %d) not available: %d\n", idx, neighborR, neighborC, refAbove[idx]);
}
}
//Make filtered version (for luma only)
if(channel == 0) {
//Special cases for the corner, bottom, and right pixels, [1 2 1] FIR filter for the rest
//pF[ −1 ][ −1 ] = ( p[ −1 ][ 0 ] + 2 * p[ −1 ][ −1 ] + p[ 0 ][ −1 ] + 2 ) >> 2
refLeftFlt[0] = refAboveFlt[0] = (refLeft[1] + 2 * refLeft[0] + refAbove[1] + 2) >> 2;
for(uint32_t idx = 1; idx < cuWidth2; idx++) {
refLeftFlt[idx] = (refLeft[idx-1] + 2 * refLeft[idx] + refLeft[idx+1] + 2) >> 2;
refAboveFlt[idx] = (refAbove[idx-1] + 2 * refAbove[idx] + refAbove[idx+1] + 2) >> 2;
}
refLeftFlt[cuWidth2] = refLeft[cuWidth2];
refAboveFlt[cuWidth2] = refAbove[cuWidth2];
}
}
//luma_size is the (square) block size of luma blocks, chroma blocks are assumed (luma_size/2)x(luma_size/2)
void ece408_intra_pred(x265_picture *pic, int width, int height, int luma_size, unsigned int cu_index, int32_t *y_ptr, int32_t *cb_ptr, int32_t *cr_ptr) {
unsigned int luma_r = (cu_index / (width / luma_size)) * luma_size;
unsigned int luma_c = (cu_index % (width / luma_size)) * luma_size;
//Copy luma bytes into orig_yuv
Pel *walker = orig_yuv.getLumaAddr();
for(int i = 0; i < luma_size; i++) {
memcpy(walker, ((Pel *)pic->planes[0]) + (((luma_r + i)*pic->stride[0]) + luma_c), luma_size*sizeof(*walker));
walker += luma_size;
}
if(luma_size > 4) {
//Copy chroma bytes into orig_yuv
unsigned int chroma_r = luma_r / 2;
unsigned int chroma_c = luma_c / 2;
unsigned int chroma_size = luma_size / 2;
walker = orig_yuv.getCbAddr();
for(unsigned int i = 0; i < chroma_size; i++) {
memcpy(walker, ((Pel *)pic->planes[1]) + (((chroma_r + i)*pic->stride[1]) + chroma_c), chroma_size*sizeof(*walker));
walker += chroma_size;
}
walker = orig_yuv.getCrAddr();
for(unsigned int i = 0; i < chroma_size; i++) {
memcpy(walker, ((Pel *)pic->planes[2]) + (((chroma_r + i)*pic->stride[2]) + chroma_c), chroma_size*sizeof(*walker));
walker += chroma_size;
}
}
//Get the unfiltered and filtered reference pixels. Position them (cuWidth-1) elements into their respective arrays so that the
//angular prediction function can use the unused space at the beginning of the array to extend the reference pixels as described
//in equations 8-48 and 8-56 in Section 8.4.4.2.6 of the H.265 standard.
getReferencePixels(pic, width, height, luma_size, cu_index, refAbove1+luma_size-1, refLeft1+luma_size-1, refAbove2+luma_size-1, refLeft2+luma_size-1, /*channel*/ 0);
#ifdef VERBOSE
printf("Above ");
for(int i = 0; i < (2*luma_size+1); i++)
printf("%3d ", refAbove1[i+luma_size-1]);
printf("\nLeft ");
for(int i = 0; i < (2*luma_size+1); i++)
printf("%3d ", refLeft1[i+luma_size-1]);
printf("\nAboveFilt ");
for(int i = 0; i < (2*luma_size+1); i++)
printf("%3d ", refAbove2[i+luma_size-1]);
printf("\nLeftFilt ");
for(int i = 0; i < (2*luma_size+1); i++)
printf("%3d ", refLeft2[i+luma_size-1]);
printf("\n");
#endif
ece408_intra_pred_channel(luma_size, 0, y_ptr);
if(luma_size > 4) { //No 2x2 chroma blocks, and 4x4 chroma blocks are covered with 8x8 luma
getReferencePixels(pic, width, height, luma_size, cu_index, (refAbove1+luma_size/2)-1, refLeft1+(luma_size/2)-1, NULL, NULL, /*channel*/ 1);
ece408_intra_pred_channel(luma_size, 1, cb_ptr);
getReferencePixels(pic, width, height, luma_size, cu_index, (refAbove2+luma_size/2)-1, refLeft2+(luma_size/2)-1, NULL, NULL, /*channel*/ 2);
ece408_intra_pred_channel(luma_size, 2, cr_ptr);
}
}
ece408_intra_pred_result *ece408_competition_ref(TEncCfg *encoder, x265_picture *pics_in, int num_frames) {
ece408_intra_pred_result *ret = new ece408_intra_pred_result[4*num_frames]; //4x4,8x8,16x16,32x32
ece408_intra_pred_result *cur_result = ret;
for(int i = 0; i < num_frames; i++) {
for(int luma_size_shift = 2; luma_size_shift <= 5; luma_size_shift++) {
int luma_size = 1 << luma_size_shift; // luma_size x luma_size luma PBs
cur_result->create(param->sourceWidth, param->sourceHeight, luma_size);
int32_t *y_satd_results = cur_result->y_satd_results;
uint8_t *y_modes = cur_result->y_modes;
int32_t *cb_satd_results = cur_result->cb_satd_results;
uint8_t *cb_modes = cur_result->cb_modes;
int32_t *cr_satd_results = cur_result->cr_satd_results;
uint8_t *cr_modes = cur_result->cr_modes;
orig_yuv.destroy();
orig_yuv.create(luma_size, luma_size, X265_CSP_I420);
pred_yuv.destroy();
pred_yuv.create(luma_size, luma_size, X265_CSP_I420);
for(unsigned int cuIndex = 0; cuIndex < (unsigned int)((encoder->param.sourceWidth/luma_size)*(encoder->param.sourceHeight/luma_size)); cuIndex++) {
ece408_intra_pred(&(pics_in[i]),
encoder->param.sourceWidth,
encoder->param.sourceHeight,
luma_size,
cuIndex,
&(y_satd_results[35*cuIndex]),
&(cb_satd_results[35*cuIndex]),
&(cr_satd_results[35*cuIndex]));
//printf("SATD results: ");
//for(int l = 0; l < 35; l++) {
// printf("(%d, %d, %d, %d) ", l, y_satd_results[35*cuIndex+l], cb_satd_results[35*cuIndex+l], cr_satd_results[35*cuIndex+l]);
//}
//printf("\n");
for(int mode = 0; mode < 35; mode++) {
y_satd_results[35*cuIndex + mode] = (y_satd_results[35*cuIndex + mode] << 8) | mode;
if(luma_size > 4) {
cb_satd_results[35*cuIndex + mode] = (cb_satd_results[35*cuIndex + mode] << 8) | mode;
cr_satd_results[35*cuIndex + mode] = (cr_satd_results[35*cuIndex + mode] << 8) | mode;
}
}
std::sort(&(y_satd_results[35*cuIndex]), &(y_satd_results[35*cuIndex+35]));
if(luma_size > 4) {
std::sort(&(cb_satd_results[35*cuIndex]), &(cb_satd_results[35*cuIndex+35]));
std::sort(&(cr_satd_results[35*cuIndex]), &(cr_satd_results[35*cuIndex+35]));
}
for(int mode = 0; mode < 35; mode++) {
y_modes[35*cuIndex+mode] = (y_satd_results[35*cuIndex+mode] & 0xFF);
y_satd_results[35*cuIndex+mode] >>= 8;
if(luma_size > 4) {
cb_modes[35*cuIndex+mode] = (cb_satd_results[35*cuIndex+mode] & 0xFF);
cb_satd_results[35*cuIndex+mode] >>= 8;
cr_modes[35*cuIndex+mode] = (cr_satd_results[35*cuIndex+mode] & 0xFF);
cr_satd_results[35*cuIndex+mode] >>= 8;
}
}
}
#ifdef MODE_HIST
int ymode_hist[35], cbmode_hist[35], crmode_hist[35];
for(int l = 0; l < 35; l++) {
ymode_hist[l] = cbmode_hist[l] = crmode_hist[l] = 0;
}
for(int l = 0; l < (35*((param->sourceWidth/luma_size)*(param->sourceHeight/luma_size))); l += 35) { //+= 1 to make sure all modes are accounted for, += 35 for histogram of best modes
ymode_hist[y_modes[l]]++;
if(luma_size > 4) {
cbmode_hist[cb_modes[l]]++;
crmode_hist[cr_modes[l]]++;
}
}
printf("ymode hist: ");
for(int l = 0; l < 35; l++)
printf("%d ", ymode_hist[l]);
if(luma_size > 4) {
printf("\ncbmode hist: ");
for(int l = 0; l < 35; l++)
printf("%d ", cbmode_hist[l]);
printf("\ncrmode hist: ");
for(int l = 0; l < 35; l++)
printf("%d ", crmode_hist[l]);
}
printf("\n");
#endif
cur_result++;
}
}
return ret;
}
//TODO sort student results by satd result *and* mode number to make sure we have *exactly* the same bytes in
//both arrays, even if several modes have the same SATD value.
//We want to do the sort here so that students are not required to (it's not necessary in a real x265 use case).
bool ece408_compare(ece408_intra_pred_result *ref, ece408_intra_pred_result *student, int num_frames) {
if(student == NULL) {
printf("Student result array pointer is NULL\n");
return false;
}
for(int i = 0; i < (4*num_frames); i++) {
int block_offset=35;
int b_s = 0;
for(int idx=0;idx<35;idx++)
{
//printf("\nSERIAL OFFSET: %d\n", block_offset+idx);
printf("Serial code : For mode: %u Ref value:%i\n",ref[b_s].y_modes[block_offset+idx], ref[b_s].y_satd_results[block_offset+idx]);
}
if(ref[i].luma_block_size != student[i].luma_block_size) {
printf("Ref result %d luma block size = %d, student = %d\n", i, ref[i].luma_block_size, student[i].luma_block_size);
return false;
}
if(ref[i].num_blocks != student[i].num_blocks) {
printf("Ref result %d num_blocks = %d, student = %d\n", i, ref[i].num_blocks, student[i].num_blocks);
return false;
}
if(memcmp(ref[i].y_modes, student[i].y_modes, 35*ref[i].num_blocks*sizeof(*ref[i].y_modes))) {
printf("Result %d, ref and student y_modes mismatched\n", i);
return false;
}
if(memcmp(ref[i].y_satd_results, student[i].y_satd_results, 35*ref[i].num_blocks*sizeof(*ref[i].y_satd_results))) {
printf("Result %d, ref and student y_satd_results mismatched\n", i);
return false;
}
if(ref[i].luma_block_size > 4) {
if(memcmp(ref[i].cb_modes, student[i].cb_modes, 35*ref[i].num_blocks*sizeof(*ref[i].cb_modes))) {
printf("Result %d, ref and student cb_modes mismatched\n", i);
return false;
}
if(memcmp(ref[i].cb_satd_results, student[i].cb_satd_results, 35*ref[i].num_blocks*sizeof(*ref[i].cb_satd_results))) {
printf("Result %d, ref and student cb_satd_results mismatched\n", i);
return false;
}
if(memcmp(ref[i].cr_modes, student[i].cr_modes, 35*ref[i].num_blocks*sizeof(*ref[i].cr_modes))) {
printf("Result %d, ref and student cr_modes mismatched\n", i);
return false;
}
if(memcmp(ref[i].cr_satd_results, student[i].cr_satd_results, 35*ref[i].num_blocks*sizeof(*ref[i].cr_satd_results))) {
printf("Result %d, ref and student cr_satd_results mismatched\n", i);
return false;
}
}
}
return true;
}
ece408_intra_pred_result *ece408_competition(ece408_frame *imgs, int num_frames) {
//Fill in your own!
(void)imgs;
ece408_frame * imgs1 = (ece408_frame *)imgs;
ece408_intra_pred_result *ret = new ece408_intra_pred_result[4*num_frames]; //8x8,16x16,32x32,64x64
ece408_intra_pred_result *cur_result = ret;
unsigned int debug_print = ((imgs->height+4-1)/4)*((imgs->width+4-1)/4);
printf("debug print : %d\n",debug_print );
cudaError_t cuda_ret;
uint8_t *d_y,
*d_cr,
*d_cb;
unsigned int y_size = ((imgs->width) * (imgs->height)) * sizeof(uint8_t);
printf("\n Y SIZE : %u\n", y_size);
unsigned int cr_size,
cb_size;
// TO DO : do we need a ceil here ?
cr_size = cb_size = (y_size/2);
// Allocate global memorcy for y, cr, cb components of the frame
cuda_ret = cudaMalloc((void **) &d_y, y_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMalloc((void **) &d_cr, cr_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMalloc((void **) &d_cb, cb_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaDeviceSynchronize();
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMemcpy(d_y, imgs1->y, y_size, cudaMemcpyHostToDevice);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMemcpy(d_cr, imgs1->cr, cr_size, cudaMemcpyHostToDevice);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMemcpy(d_cb, imgs1->cb, cb_size, cudaMemcpyHostToDevice);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
printf("I AM AT THE END CUDA MEMCPY STAGE 1\n");
cuda_ret = cudaDeviceSynchronize();
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
for(int i = 0; i < num_frames; i++) {
int res_count = 0;
//for(int luma_size_shift = 2; luma_size_shift <= 5; luma_size_shift++) {
for(int luma_size_shift = 2; luma_size_shift <=3; luma_size_shift++) {
int luma_size = 1 << luma_size_shift; // luma_size x luma_size luma PBs
//cur_result->create(32, 32, luma_size);
cur_result->create(imgs1->width, imgs1->height, luma_size);
// Start
int32_t *d_res_y;
int32_t *d_res_cr;
int32_t *d_res_cb;
uint8_t *d_y_modes;
uint8_t *d_cr_modes;
uint8_t *d_cb_modes;
//unsigned int y_res_size = (35 * (cur_result->num_blocks));
unsigned int num_blocks = ((imgs->height+luma_size-1)/luma_size)*((imgs->width+luma_size-1)/luma_size);
unsigned int y_res_size = 35*num_blocks*sizeof(int32_t);
unsigned int mode_size = 35*num_blocks*sizeof(uint8_t);
unsigned int cr_res_size,
cb_res_size;
printf("No.of blocks launched:%u\n",y_res_size/sizeof(int32_t));
cr_res_size = cb_res_size = y_res_size;
// Allocate result in the device
cuda_ret = cudaMalloc((void **) &d_res_y, y_res_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMalloc((void **) &d_y_modes, mode_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if ( luma_size > 4 )
{
cuda_ret = cudaMalloc((void **) &d_res_cr, cr_res_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMalloc((void **) &d_res_cb, cb_res_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMalloc((void **) &d_cr_modes, mode_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMalloc((void **) &d_cb_modes, mode_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
cuda_ret = cudaDeviceSynchronize();
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Grid dimension
dim3 dimGrid = dim3((int)ceil((imgs->width)/(float)luma_size), (int)ceil((imgs->height)/(float)luma_size), 1);
// Block dimension
dim3 dimBlock = dim3(luma_size, luma_size, 1);
//int neighbour_array_size = luma_size*2+1;
printf("\n KERNEL CONFIG: %d %d %d %d\n", dimGrid.x, dimGrid.y, dimBlock.x, dimBlock.y);
hevcPredictionKernel<<<dimGrid, dimBlock>>>(d_y, d_cr, d_cb, d_res_y, d_res_cr, d_res_cb, d_y_modes, d_cr_modes, d_cb_modes, imgs->height, imgs->width);
cuda_ret = cudaDeviceSynchronize();
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
printf("current result num_block_size is %d\n", num_blocks);
printf("from serial code num_block is %d\n",cur_result->num_blocks);
cuda_ret = cudaMemcpy(cur_result->y_satd_results, d_res_y, y_res_size, cudaMemcpyDeviceToHost);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
/*
cuda_ret = cudaMemcpy(cur_result->cr_satd_results, d_res_cr, cr_res_size, cudaMemcpyDeviceToHost);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMemcpy(cur_result->cb_satd_results, d_res_cb, cb_res_size, cudaMemcpyDeviceToHost);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
*/
cuda_ret = cudaMemcpy(cur_result->y_modes, d_y_modes,mode_size, cudaMemcpyDeviceToHost);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
/*
cuda_ret = cudaMemcpy(cur_result->cr_modes, d_cr_modes, cr_res_size, cudaMemcpyDeviceToHost);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMemcpy(cur_result->cb_modes, d_cb_modes, cb_res_size, cudaMemcpyDeviceToHost);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
*/
cur_result++;
res_count++;
}
}
return ret;
}
|
b0d30a02250479b58cb20444a5e19f15eb56fbbe.hip | // !!! This is a file automatically generated by hipify!!!
// This file is part of the cube - ica/cuda - software package
// Copyright 2010-2013 Christian Kellner <[email protected]>
// Copyright 1999 Michael S. Lewicki and CMU
// Part of the code is based on code written by Mike Lewicki
//
// License: MIT (see LICENSE.BSD-MIT)
#include "cube.h"
#include "cube_blas.h"
#include "cube_matrix.h"
#include "cube_ica_kernels.h"
#include "cube_private.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
__device__ double k_dsign (double v)
{
if (v > 0)
return 1;
else if (v < 0)
return -1;
else
return 0;
}
typedef struct _exp_param
{
double *y;
int n;
double mu;
double sigma;
double a;
double b;
double *sum;
int bs;
} exp_param;
__device__ double
sumpower_dev (const double *x,
int n,
double p,
double mu,
double sigma,
double *sum,
int bs)
{
int i, off;
// zero out shared mem so we can sum on int directly
sum[threadIdx.x] = 0;
__syncthreads (); // necessary?
for (i = 0; i < bs; i++)
{
int block = threadIdx.x*bs;
int col = block+i;
double u;
if (!(col < n))
break;
u = pow (fabs ((x[col] - mu) / sigma), p);
sum[threadIdx.x] += u;
}
// b-tree result calculation
for (off = 1; off < blockDim.x; off = off << 1)
{
__syncthreads ();
if (threadIdx.x < (blockDim.x/(off*2)))
{
int off_x = (threadIdx.x * off * 2);
int off_y = off_x + off;
sum[off_x] += sum[off_y];
}
}
// write memory back to device memory
__syncthreads (); // not sure that is needed either
// sum[0] will hold the result!
return sum[0];
}
__device__ double
exp_pwr_l_beta (double beta, const exp_param params)
{
double *y;
double mu, sigma, a, b;
double c, logw, uas, l;
double p;
int n;
y = params.y;
n = params.n;
mu = params.mu;
sigma = params.sigma;
a = params.a;
b = params.b;
p = 2.0/(1+beta);
c = pow ((tgamma(3.0/p)/tgamma(1/p)), (p/2.0));
logw = 0.5 * lgamma (3.0/p) - 1.5 * lgamma (1/p) - log (1+beta);
uas = sumpower_dev (y, n, p, mu, sigma, params.sum, params.bs);
l = (-1*lgamma(a)) - (a*log(b)) + ((a-1.0)*log(1.0+beta)) +
n*logw - n*log(sigma) - ((1.0+beta)/b) - (c * uas);
return l;
}
__device__ double
exp_pwrlbeta_fmin (double x, const exp_param fparams)
{
double l, beta;
beta = exp (x) - 1;
l = -1 * exp_pwr_l_beta (beta, fparams);
return l;
}
/* *************************************************************************** */
// Adapted from SciPy's fminbound function
// in SciPy's optimize module (cf. /scipy/optimize/optimize.py)
// Its license notice:
// ******NOTICE***************
// optimize.py module by Travis E. Oliphant
//
// You may copy and use this module as you see fit with no
// guarantee implied provided you keep this notice in all copies.
// *****END NOTICE************
#define mean_2(a,b) 0.5*(a+b)
/* No input checking, lower > upper required */
__device__ double
f_min_bound (double lower, double upper, double tol, exp_param fparams)
{
double tol1, tol2;
int maxfun, maxiter;
double a, b, d, e;
double fluc, ffluc, nfc, fnfc, x, xf, fx, xm;
int count, iter;
const double seps = 1.4901e-08; // nfcorth calculating?
const double golden = 0.5 * (3.0 - sqrt (5.0));
count = 0.0;
iter = 0;
maxiter = maxfun = 500;
a = lower;
b = upper;
fluc = nfc = x = xf = a + golden * (b - a);
d = e = 0.0;
fx = exp_pwrlbeta_fmin(x, fparams);
ffluc = fnfc = fx;
xm = mean_2(a,b); //0.5 * (a + b);
tol1 = seps * fabs(xf) + tol/3.0;
tol2 = 2.0 * tol1;
count++;
while (fabs (xf-xm) > (tol2 - 0.5*(b-a)))
{
double r, q, p, si, fu;
bool do_gs = true;
if (fabs(e) > tol1)
{
r = (xf - nfc) * (fx - ffluc);
q = (xf - fluc) * (fx - fnfc);
p = (xf - fluc) * q - (xf - nfc) * r;
q = 2.0 * (q - r);
if (q > 0.0)
p = -p;
q = fabs(q);
r = e; e = d;
if ((fabs(p) < fabs(0.5 * q * r)) &&
(p > q * (a - xf)) &&
(p < q * (b - xf)))
{
d = p/q;
x = xf + d;
if (((x - a) < tol2) || ((b - x) < tol2))
{
si = k_dsign (xm - xf) + ((xm - xf) == 0);
d = tol1 * si;
}
do_gs = false;
}
}
if (do_gs)
{
e = xf >= xm ? a - xf : b - xf;
d = golden * e;
}
si = k_dsign(d) + (d == 0);
x = xf + si * fmax (fabs(d), tol1);
fu = exp_pwrlbeta_fmin(x, fparams);
count++;
iter++;
if (fu <= fx)
{
(x >= xf ? a : b) = xf;
fluc = nfc; ffluc = fnfc;
nfc = xf; fnfc = fx;
xf = x; fx = fu;
}
else // fu > fx
{
(x < xf ? a : b) = x;
if ((fu <= fnfc) || (nfc == xf))
{
fluc = nfc; ffluc = fnfc;
nfc = x; fnfc = fu;
}
else if ((fu <= ffluc) || (fluc == xf) || (fluc == nfc))
{
fluc = x; ffluc = fu;
}
}
xm = mean_2(a,b);
tol1 = seps * fabs(xf) + tol/3.0;
tol2 = 2.0 * tol1;
if (count > maxfun || iter > maxiter)
break;
}
return xf;
}
/* x, y are memory references (C layout, row-major),
m, n are matrix dimensions,
(col-major means m == y and n == x) */
__global__ void
adapt_prior_kernel (const double *in,
const int m,
const int bs,
const double ax,
const double bx,
const double a,
const double b,
double *out)
{
exp_param fparams;
extern __shared__ double data[];
int i;
double res;
// read data from global memory
for (i = 0; i < bs; i++)
{
int x = threadIdx.x * bs + i;
int y = blockIdx.y * m;
if (x < m)
data[x] = in[y + x];
}
// now the calculation
fparams.y = &data[0];
fparams.n = m;
fparams.bs = bs;
fparams.sum = &data[m];
fparams.a = a;
fparams.b = b;
fparams.mu = 0.0;
fparams.sigma = 1.0;
res = f_min_bound (ax, bx, 0.1, fparams);
if (threadIdx.x == 0)
out[blockIdx.y] = exp(res) - 1;
}
int
gpu_adapt_prior_host (cube_t *ctx, const double *in, int m, int n, double mu, double sigma, double tol, double a, double b, double *beta)
{
hipError_t r;
double *devp, *out;
double betamin, betamax;
double xmin, xmax;
dim3 grid, block;
size_t smem;
int bs;
if (! cube_context_check (ctx))
return -1;
betamin = -0.9;
betamax = 20.0;
xmin = log (1 + betamin);
xmax = log (1 + betamax);
out = (double *) cube_malloc_device (ctx, sizeof (double) * n);
devp = (double *) cube_malloc_device (ctx, sizeof (double) * n * m);
cube_memcpy (ctx, devp, (void *) in, sizeof (double) * n * m, CMK_HOST_2_DEVICE);
grid.y = n;
block.x = 512;
smem = (block.x + m) * sizeof (double);
bs = ceil ((double) m / block.x);
hipLaunchKernelGGL(( adapt_prior_kernel), dim3(grid), dim3(block), smem, 0, devp, m, bs, xmin, xmax, a, b, out);
r = hipPeekAtLastError ();
cube_cuda_check (ctx, r);
cube_memcpy (ctx, beta, out, sizeof (double) * n, CMK_DEVICE_2_HOST);
cube_free_device (ctx, devp);
cube_free_device (ctx, out);
return cube_cuda_check (ctx, r);
}
int
gpu_adapt_prior (cube_t *ctx, const double *in, int m, int n, double mu, double sigma, double tol, double a, double b, double *beta)
{
hipError_t r;
double betamin, betamax;
double xmin, xmax;
dim3 grid, block;
size_t smem;
int bs;
if (! cube_context_check (ctx))
return -1;
betamin = -0.9;
betamax = 20.0;
xmin = log (1 + betamin);
xmax = log (1 + betamax);
grid.y = n;
block.x = 512;
smem = (block.x + m) * sizeof (double);
bs = ceil ((double) m / block.x);
hipLaunchKernelGGL(( adapt_prior_kernel), dim3(grid), dim3(block), smem, 0, in, m, bs, xmin, xmax, a, b, beta);
r = hipPeekAtLastError ();
return cube_cuda_check (ctx, r);
}
__global__ void
sumpower_kernel (const double *in, int n, int bs, double p, double *out)
{
int tid;
extern __shared__ double data[];
double *sum;
int i;
tid = threadIdx.x;
// read data from global memory
for (i = 0; i < bs; i++)
{
int col = tid*bs+i;
if (col < n)
data[col] = in[col];
}
sum = &data[n];
sumpower_dev (data, n, p, 0, 1, sum, bs);
if (tid == 0)
out[blockIdx.x] = sum[0];
}
double
gpu_sumpower (cube_t *ctx, const double *in, int n, double p)
{
hipError_t r;
double *devp, res, *out;
dim3 grid, block;
size_t smem;
int bs;
if (! cube_context_check (ctx))
return -1;
out = (double *) cube_host_register (ctx, &res, sizeof (res));
devp = (double *) cube_malloc_device (ctx, sizeof (double) * n);
cube_memcpy (ctx, devp, (void *) in, sizeof (double) * n, CMK_HOST_2_DEVICE);
block.x = 512;
smem = (block.x + n) * sizeof (double);
bs = ceil ((double) n / block.x);
hipLaunchKernelGGL(( sumpower_kernel), dim3(grid), dim3(block), smem, 0, devp, n, bs, p, out);
r = hipPeekAtLastError ();
cube_cuda_check (ctx, r);
cube_host_unregister (ctx, &res);
return res;
}
__global__ void
kernel_calc_z (const double *S_g,
int m,
int n,
const double *mu_g,
const double *beta_g,
const double *sigma_g,
double *Z)
{
extern __shared__ double smem[];
double mu, beta, sigma;
double *mu_s, *beta_s, *sigma_s, *S_s, *Z_s;
double s, q, c, z;
int global_x, global_y, lid, gid;
/* calculate global and local ids */
global_x = (blockDim.x * blockIdx.x) + threadIdx.x;
global_y = (blockDim.y * blockIdx.y) + threadIdx.y;
if (global_x > n)
return;
gid = (n * global_y) + global_x;
lid = (threadIdx.y * blockDim.x) + threadIdx.x;
mu_s = &smem[0];
beta_s = &smem[blockDim.x];
sigma_s = &smem[2*blockDim.x];
S_s = &smem[3 * blockDim.x];
Z_s = &smem[(3 + blockDim.y) *blockDim.x];
mu_s[threadIdx.y] = mu_g[global_y];
beta_s[threadIdx.y] = beta_g[global_y];
sigma_s[threadIdx.y] = sigma_g[global_y];
S_s[lid] = S_g[gid];
__syncthreads();
if (global_y > m)
return;
mu = mu_s[threadIdx.y];
beta = beta_s[threadIdx.y];
sigma = sigma_s[threadIdx.y];
s = S_s[lid];
/* do the computation */
s -= mu;
q = (2.0/(1.0+beta));
c = pow ((tgamma(3.0/q)/tgamma(1.0/q)), (q/2.0));
z = -1 * (q*c/pow (sigma,q)) * pow (abs (s), q-1.0) * k_dsign (s);
Z_s[lid] = z;
__syncthreads();
Z[gid] = Z_s[lid];
}
int
cube_gpu_calc_Z (cube_t *ctx,
cube_matrix_t *S,
cube_matrix_t *Z,
cube_matrix_t *mu,
cube_matrix_t *beta,
cube_matrix_t *sigma)
{
hipError_t res;
double *devS, *devZ, *devmu, *devbeta, *devsigma;
dim3 grid, block;
int m, n;
size_t smem;
if (! cube_context_check (ctx))
return cube_context_check (ctx);
m = cube_matrix_get_m (Z);
n = cube_matrix_get_n (Z);
block.x = 16;
block.y = 16;
grid.x = ceil (n / (double) block.x);
grid.y = ceil (m / (double) block.y);
smem = block.y * sizeof (double) * (3 + 2*block.x);
devS = (double *) S->dev_ptr;
devZ = (double *) Z->dev_ptr;
devmu = (double *) mu->dev_ptr;
devbeta = (double *) beta->dev_ptr;
devsigma = (double *) sigma->dev_ptr;
hipLaunchKernelGGL(( kernel_calc_z), dim3(grid), dim3(block), smem, 0, devS, m, n, devmu, devbeta, devsigma, devZ);
res = hipPeekAtLastError ();
return cube_cuda_check (ctx, res);
}
__global__ void
update_AdA_kernel (double *A,
const double *dA,
int m,
int n,
const double *epsilon,
const int *iamax)
{
double max;
const double eps = *epsilon;
extern __shared__ double smem[];
double *dA_data;
double *A_data;
int global_x, global_y, lid, gid;
/* calculate global and local ids */
global_x = (blockDim.x * blockIdx.x) + threadIdx.x;
global_y = (blockDim.y * blockIdx.y) + threadIdx.y;
/* see if we are inside the boundaries */
if (global_x > n || global_y > m)
return;
gid = (n * global_y) + global_x;
lid = (threadIdx.y * blockDim.x) + threadIdx.x;
/* set up shared memory addresses */
A_data = &smem[0];
dA_data = &smem[blockDim.x * blockDim.y];
dA_data[lid] = dA[gid];
A_data[lid] = A[gid];
__syncthreads();
/* do the computation */
max = fabs(dA[*iamax - 1]); /* global read, but LDU hopefully (FIXME, not sure) */
A_data[lid] += (eps / max) * dA_data[lid];
/* write result back */
__syncthreads();
A[gid] = A_data[lid];
}
void
gpu_update_A_with_delta_A (cube_t *ctx,
cube_matrix_t *A,
cube_matrix_t *dA,
const double *epsilon,
const int *iamax)
{
hipError_t res;
double *devA, *devdA;
dim3 grid, block;
size_t smem;
int m, n;
if (! cube_context_check (ctx))
return;
m = A->m;
n = A->n;
block.x = 16;
block.y = 16;
grid.x = ceil (n / (double) block.x);
grid.y = ceil (m / (double) block.y);
block.z = grid.z = 1;
smem = 2 * block.x * block.y * sizeof (double);
devA = (double *) A->dev_ptr;
devdA = (double *) dA->dev_ptr;
//printf ("%u, %u, %zu\n", grid.x, grid.y, smem);
hipLaunchKernelGGL(( update_AdA_kernel), dim3(grid), dim3(block), smem, 0, devA, devdA, m, n, epsilon, iamax);
res = hipPeekAtLastError ();
cube_cuda_check (ctx, res);
}
__global__
void k_collect_prior (double *S, int m, int n, int lds, double *Sp, int ldsp, int index)
{
extern __shared__ double smem[];
int global_x, global_y, lid, gid;
/* calculate global and local ids */
global_x = (blockDim.x * blockIdx.x) + threadIdx.y; //n
global_y = (blockDim.y * blockIdx.y) + threadIdx.x; //m
gid = (lds * global_x) + global_y;
lid = (threadIdx.y * blockDim.x) + threadIdx.x;
/* read memory form S and write to smem */
/* see if we are inside the boundaries */
if (global_x < n && global_y < m)
smem[lid] = S[gid];
__syncthreads();
/* recaculate gid, lid and write to glboal memory */
global_x = (blockDim.x * blockIdx.y) + threadIdx.y;
global_y = (blockDim.y * blockIdx.x) + threadIdx.x;
gid = (ldsp * global_x) + index + global_y;
lid = (threadIdx.x * blockDim.x) + threadIdx.y;
if (global_x < m && global_y < (ldsp - index))
Sp[gid] = smem[lid];
}
void
gpu_collect_prior (cube_t *ctx,
cube_matrix_t *S,
cube_matrix_t *priorS,
int index)
{
hipError_t res;
double *devS, *devSp;
dim3 grid, block;
int m, n, lds, ldsp;
size_t smem;
if (! cube_context_check (ctx))
return;
m = cube_matrix_get_m (S);
n = cube_matrix_get_n (S);
lds = m;
ldsp = cube_matrix_get_m (priorS);
block.x = 16;
block.y = 16;
grid.x = ceil (n / (double) block.x);
grid.y = ceil (m / (double) block.y);
smem = block.x * block.y * sizeof (double);
devS = (double *) S->dev_ptr;
devSp = (double *) priorS->dev_ptr;
hipLaunchKernelGGL(( k_collect_prior), dim3(grid), dim3(block), smem, 0, devS, m, n, lds, devSp, ldsp, index);
res = hipPeekAtLastError ();
cube_cuda_check (ctx, res);
}
| b0d30a02250479b58cb20444a5e19f15eb56fbbe.cu | // This file is part of the cube - ica/cuda - software package
// Copyright © 2010-2013 Christian Kellner <[email protected]>
// Copyright © 1999 Michael S. Lewicki and CMU
// Part of the code is based on code written by Mike Lewicki
//
// License: MIT (see LICENSE.BSD-MIT)
#include "cube.h"
#include "cube_blas.h"
#include "cube_matrix.h"
#include "cube_ica_kernels.h"
#include "cube_private.h"
#include <cuda.h>
#include <stdio.h>
__device__ double k_dsign (double v)
{
if (v > 0)
return 1;
else if (v < 0)
return -1;
else
return 0;
}
typedef struct _exp_param
{
double *y;
int n;
double mu;
double sigma;
double a;
double b;
double *sum;
int bs;
} exp_param;
__device__ double
sumpower_dev (const double *x,
int n,
double p,
double mu,
double sigma,
double *sum,
int bs)
{
int i, off;
// zero out shared mem so we can sum on int directly
sum[threadIdx.x] = 0;
__syncthreads (); // necessary?
for (i = 0; i < bs; i++)
{
int block = threadIdx.x*bs;
int col = block+i;
double u;
if (!(col < n))
break;
u = pow (fabs ((x[col] - mu) / sigma), p);
sum[threadIdx.x] += u;
}
// b-tree result calculation
for (off = 1; off < blockDim.x; off = off << 1)
{
__syncthreads ();
if (threadIdx.x < (blockDim.x/(off*2)))
{
int off_x = (threadIdx.x * off * 2);
int off_y = off_x + off;
sum[off_x] += sum[off_y];
}
}
// write memory back to device memory
__syncthreads (); // not sure that is needed either
// sum[0] will hold the result!
return sum[0];
}
__device__ double
exp_pwr_l_beta (double beta, const exp_param params)
{
double *y;
double mu, sigma, a, b;
double c, logw, uas, l;
double p;
int n;
y = params.y;
n = params.n;
mu = params.mu;
sigma = params.sigma;
a = params.a;
b = params.b;
p = 2.0/(1+beta);
c = pow ((tgamma(3.0/p)/tgamma(1/p)), (p/2.0));
logw = 0.5 * lgamma (3.0/p) - 1.5 * lgamma (1/p) - log (1+beta);
uas = sumpower_dev (y, n, p, mu, sigma, params.sum, params.bs);
l = (-1*lgamma(a)) - (a*log(b)) + ((a-1.0)*log(1.0+beta)) +
n*logw - n*log(sigma) - ((1.0+beta)/b) - (c * uas);
return l;
}
__device__ double
exp_pwrlbeta_fmin (double x, const exp_param fparams)
{
double l, beta;
beta = exp (x) - 1;
l = -1 * exp_pwr_l_beta (beta, fparams);
return l;
}
/* *************************************************************************** */
// Adapted from SciPy's fminbound function
// in SciPy's optimize module (cf. /scipy/optimize/optimize.py)
// Its license notice:
// ******NOTICE***************
// optimize.py module by Travis E. Oliphant
//
// You may copy and use this module as you see fit with no
// guarantee implied provided you keep this notice in all copies.
// *****END NOTICE************
#define mean_2(a,b) 0.5*(a+b)
/* No input checking, lower > upper required */
__device__ double
f_min_bound (double lower, double upper, double tol, exp_param fparams)
{
double tol1, tol2;
int maxfun, maxiter;
double a, b, d, e;
double fluc, ffluc, nfc, fnfc, x, xf, fx, xm;
int count, iter;
const double seps = 1.4901e-08; // nfcorth calculating?
const double golden = 0.5 * (3.0 - sqrt (5.0));
count = 0.0;
iter = 0;
maxiter = maxfun = 500;
a = lower;
b = upper;
fluc = nfc = x = xf = a + golden * (b - a);
d = e = 0.0;
fx = exp_pwrlbeta_fmin(x, fparams);
ffluc = fnfc = fx;
xm = mean_2(a,b); //0.5 * (a + b);
tol1 = seps * fabs(xf) + tol/3.0;
tol2 = 2.0 * tol1;
count++;
while (fabs (xf-xm) > (tol2 - 0.5*(b-a)))
{
double r, q, p, si, fu;
bool do_gs = true;
if (fabs(e) > tol1)
{
r = (xf - nfc) * (fx - ffluc);
q = (xf - fluc) * (fx - fnfc);
p = (xf - fluc) * q - (xf - nfc) * r;
q = 2.0 * (q - r);
if (q > 0.0)
p = -p;
q = fabs(q);
r = e; e = d;
if ((fabs(p) < fabs(0.5 * q * r)) &&
(p > q * (a - xf)) &&
(p < q * (b - xf)))
{
d = p/q;
x = xf + d;
if (((x - a) < tol2) || ((b - x) < tol2))
{
si = k_dsign (xm - xf) + ((xm - xf) == 0);
d = tol1 * si;
}
do_gs = false;
}
}
if (do_gs)
{
e = xf >= xm ? a - xf : b - xf;
d = golden * e;
}
si = k_dsign(d) + (d == 0);
x = xf + si * fmax (fabs(d), tol1);
fu = exp_pwrlbeta_fmin(x, fparams);
count++;
iter++;
if (fu <= fx)
{
(x >= xf ? a : b) = xf;
fluc = nfc; ffluc = fnfc;
nfc = xf; fnfc = fx;
xf = x; fx = fu;
}
else // fu > fx
{
(x < xf ? a : b) = x;
if ((fu <= fnfc) || (nfc == xf))
{
fluc = nfc; ffluc = fnfc;
nfc = x; fnfc = fu;
}
else if ((fu <= ffluc) || (fluc == xf) || (fluc == nfc))
{
fluc = x; ffluc = fu;
}
}
xm = mean_2(a,b);
tol1 = seps * fabs(xf) + tol/3.0;
tol2 = 2.0 * tol1;
if (count > maxfun || iter > maxiter)
break;
}
return xf;
}
/* x, y are memory references (C layout, row-major),
m, n are matrix dimensions,
(col-major means m == y and n == x) */
__global__ void
adapt_prior_kernel (const double *in,
const int m,
const int bs,
const double ax,
const double bx,
const double a,
const double b,
double *out)
{
exp_param fparams;
extern __shared__ double data[];
int i;
double res;
// read data from global memory
for (i = 0; i < bs; i++)
{
int x = threadIdx.x * bs + i;
int y = blockIdx.y * m;
if (x < m)
data[x] = in[y + x];
}
// now the calculation
fparams.y = &data[0];
fparams.n = m;
fparams.bs = bs;
fparams.sum = &data[m];
fparams.a = a;
fparams.b = b;
fparams.mu = 0.0;
fparams.sigma = 1.0;
res = f_min_bound (ax, bx, 0.1, fparams);
if (threadIdx.x == 0)
out[blockIdx.y] = exp(res) - 1;
}
int
gpu_adapt_prior_host (cube_t *ctx, const double *in, int m, int n, double mu, double sigma, double tol, double a, double b, double *beta)
{
cudaError_t r;
double *devp, *out;
double betamin, betamax;
double xmin, xmax;
dim3 grid, block;
size_t smem;
int bs;
if (! cube_context_check (ctx))
return -1;
betamin = -0.9;
betamax = 20.0;
xmin = log (1 + betamin);
xmax = log (1 + betamax);
out = (double *) cube_malloc_device (ctx, sizeof (double) * n);
devp = (double *) cube_malloc_device (ctx, sizeof (double) * n * m);
cube_memcpy (ctx, devp, (void *) in, sizeof (double) * n * m, CMK_HOST_2_DEVICE);
grid.y = n;
block.x = 512;
smem = (block.x + m) * sizeof (double);
bs = ceil ((double) m / block.x);
adapt_prior_kernel<<<grid, block, smem>>>(devp, m, bs, xmin, xmax, a, b, out);
r = cudaPeekAtLastError ();
cube_cuda_check (ctx, r);
cube_memcpy (ctx, beta, out, sizeof (double) * n, CMK_DEVICE_2_HOST);
cube_free_device (ctx, devp);
cube_free_device (ctx, out);
return cube_cuda_check (ctx, r);
}
int
gpu_adapt_prior (cube_t *ctx, const double *in, int m, int n, double mu, double sigma, double tol, double a, double b, double *beta)
{
cudaError_t r;
double betamin, betamax;
double xmin, xmax;
dim3 grid, block;
size_t smem;
int bs;
if (! cube_context_check (ctx))
return -1;
betamin = -0.9;
betamax = 20.0;
xmin = log (1 + betamin);
xmax = log (1 + betamax);
grid.y = n;
block.x = 512;
smem = (block.x + m) * sizeof (double);
bs = ceil ((double) m / block.x);
adapt_prior_kernel<<<grid, block, smem>>>(in, m, bs, xmin, xmax, a, b, beta);
r = cudaPeekAtLastError ();
return cube_cuda_check (ctx, r);
}
__global__ void
sumpower_kernel (const double *in, int n, int bs, double p, double *out)
{
int tid;
extern __shared__ double data[];
double *sum;
int i;
tid = threadIdx.x;
// read data from global memory
for (i = 0; i < bs; i++)
{
int col = tid*bs+i;
if (col < n)
data[col] = in[col];
}
sum = &data[n];
sumpower_dev (data, n, p, 0, 1, sum, bs);
if (tid == 0)
out[blockIdx.x] = sum[0];
}
double
gpu_sumpower (cube_t *ctx, const double *in, int n, double p)
{
cudaError_t r;
double *devp, res, *out;
dim3 grid, block;
size_t smem;
int bs;
if (! cube_context_check (ctx))
return -1;
out = (double *) cube_host_register (ctx, &res, sizeof (res));
devp = (double *) cube_malloc_device (ctx, sizeof (double) * n);
cube_memcpy (ctx, devp, (void *) in, sizeof (double) * n, CMK_HOST_2_DEVICE);
block.x = 512;
smem = (block.x + n) * sizeof (double);
bs = ceil ((double) n / block.x);
sumpower_kernel<<<grid, block, smem>>>(devp, n, bs, p, out);
r = cudaPeekAtLastError ();
cube_cuda_check (ctx, r);
cube_host_unregister (ctx, &res);
return res;
}
__global__ void
kernel_calc_z (const double *S_g,
int m,
int n,
const double *mu_g,
const double *beta_g,
const double *sigma_g,
double *Z)
{
extern __shared__ double smem[];
double mu, beta, sigma;
double *mu_s, *beta_s, *sigma_s, *S_s, *Z_s;
double s, q, c, z;
int global_x, global_y, lid, gid;
/* calculate global and local ids */
global_x = (blockDim.x * blockIdx.x) + threadIdx.x;
global_y = (blockDim.y * blockIdx.y) + threadIdx.y;
if (global_x > n)
return;
gid = (n * global_y) + global_x;
lid = (threadIdx.y * blockDim.x) + threadIdx.x;
mu_s = &smem[0];
beta_s = &smem[blockDim.x];
sigma_s = &smem[2*blockDim.x];
S_s = &smem[3 * blockDim.x];
Z_s = &smem[(3 + blockDim.y) *blockDim.x];
mu_s[threadIdx.y] = mu_g[global_y];
beta_s[threadIdx.y] = beta_g[global_y];
sigma_s[threadIdx.y] = sigma_g[global_y];
S_s[lid] = S_g[gid];
__syncthreads();
if (global_y > m)
return;
mu = mu_s[threadIdx.y];
beta = beta_s[threadIdx.y];
sigma = sigma_s[threadIdx.y];
s = S_s[lid];
/* do the computation */
s -= mu;
q = (2.0/(1.0+beta));
c = pow ((tgamma(3.0/q)/tgamma(1.0/q)), (q/2.0));
z = -1 * (q*c/pow (sigma,q)) * pow (abs (s), q-1.0) * k_dsign (s);
Z_s[lid] = z;
__syncthreads();
Z[gid] = Z_s[lid];
}
int
cube_gpu_calc_Z (cube_t *ctx,
cube_matrix_t *S,
cube_matrix_t *Z,
cube_matrix_t *mu,
cube_matrix_t *beta,
cube_matrix_t *sigma)
{
cudaError_t res;
double *devS, *devZ, *devmu, *devbeta, *devsigma;
dim3 grid, block;
int m, n;
size_t smem;
if (! cube_context_check (ctx))
return cube_context_check (ctx);
m = cube_matrix_get_m (Z);
n = cube_matrix_get_n (Z);
block.x = 16;
block.y = 16;
grid.x = ceil (n / (double) block.x);
grid.y = ceil (m / (double) block.y);
smem = block.y * sizeof (double) * (3 + 2*block.x);
devS = (double *) S->dev_ptr;
devZ = (double *) Z->dev_ptr;
devmu = (double *) mu->dev_ptr;
devbeta = (double *) beta->dev_ptr;
devsigma = (double *) sigma->dev_ptr;
kernel_calc_z<<<grid, block, smem>>>(devS, m, n, devmu, devbeta, devsigma, devZ);
res = cudaPeekAtLastError ();
return cube_cuda_check (ctx, res);
}
__global__ void
update_AdA_kernel (double *A,
const double *dA,
int m,
int n,
const double *epsilon,
const int *iamax)
{
double max;
const double eps = *epsilon;
extern __shared__ double smem[];
double *dA_data;
double *A_data;
int global_x, global_y, lid, gid;
/* calculate global and local ids */
global_x = (blockDim.x * blockIdx.x) + threadIdx.x;
global_y = (blockDim.y * blockIdx.y) + threadIdx.y;
/* see if we are inside the boundaries */
if (global_x > n || global_y > m)
return;
gid = (n * global_y) + global_x;
lid = (threadIdx.y * blockDim.x) + threadIdx.x;
/* set up shared memory addresses */
A_data = &smem[0];
dA_data = &smem[blockDim.x * blockDim.y];
dA_data[lid] = dA[gid];
A_data[lid] = A[gid];
__syncthreads();
/* do the computation */
max = fabs(dA[*iamax - 1]); /* global read, but LDU hopefully (FIXME, not sure) */
A_data[lid] += (eps / max) * dA_data[lid];
/* write result back */
__syncthreads();
A[gid] = A_data[lid];
}
void
gpu_update_A_with_delta_A (cube_t *ctx,
cube_matrix_t *A,
cube_matrix_t *dA,
const double *epsilon,
const int *iamax)
{
cudaError_t res;
double *devA, *devdA;
dim3 grid, block;
size_t smem;
int m, n;
if (! cube_context_check (ctx))
return;
m = A->m;
n = A->n;
block.x = 16;
block.y = 16;
grid.x = ceil (n / (double) block.x);
grid.y = ceil (m / (double) block.y);
block.z = grid.z = 1;
smem = 2 * block.x * block.y * sizeof (double);
devA = (double *) A->dev_ptr;
devdA = (double *) dA->dev_ptr;
//printf ("%u, %u, %zu\n", grid.x, grid.y, smem);
update_AdA_kernel<<<grid, block, smem>>>(devA, devdA, m, n, epsilon, iamax);
res = cudaPeekAtLastError ();
cube_cuda_check (ctx, res);
}
__global__
void k_collect_prior (double *S, int m, int n, int lds, double *Sp, int ldsp, int index)
{
extern __shared__ double smem[];
int global_x, global_y, lid, gid;
/* calculate global and local ids */
global_x = (blockDim.x * blockIdx.x) + threadIdx.y; //n
global_y = (blockDim.y * blockIdx.y) + threadIdx.x; //m
gid = (lds * global_x) + global_y;
lid = (threadIdx.y * blockDim.x) + threadIdx.x;
/* read memory form S and write to smem */
/* see if we are inside the boundaries */
if (global_x < n && global_y < m)
smem[lid] = S[gid];
__syncthreads();
/* recaculate gid, lid and write to glboal memory */
global_x = (blockDim.x * blockIdx.y) + threadIdx.y;
global_y = (blockDim.y * blockIdx.x) + threadIdx.x;
gid = (ldsp * global_x) + index + global_y;
lid = (threadIdx.x * blockDim.x) + threadIdx.y;
if (global_x < m && global_y < (ldsp - index))
Sp[gid] = smem[lid];
}
void
gpu_collect_prior (cube_t *ctx,
cube_matrix_t *S,
cube_matrix_t *priorS,
int index)
{
cudaError_t res;
double *devS, *devSp;
dim3 grid, block;
int m, n, lds, ldsp;
size_t smem;
if (! cube_context_check (ctx))
return;
m = cube_matrix_get_m (S);
n = cube_matrix_get_n (S);
lds = m;
ldsp = cube_matrix_get_m (priorS);
block.x = 16;
block.y = 16;
grid.x = ceil (n / (double) block.x);
grid.y = ceil (m / (double) block.y);
smem = block.x * block.y * sizeof (double);
devS = (double *) S->dev_ptr;
devSp = (double *) priorS->dev_ptr;
k_collect_prior<<<grid, block, smem>>>(devS, m, n, lds, devSp, ldsp, index);
res = cudaPeekAtLastError ();
cube_cuda_check (ctx, res);
}
|
d60c39f39e15f18bd333e96004064ec1d9fa0945.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MedV4D/Imaging/cuda/detail/WatershedTransformation.cuh"
#include "MedV4D/Imaging/ImageRegion.h"
__device__ uint64 foundZero;
__global__ void
isNonzeroKernel( Buffer3D< uint32 > buffer, int3 blockResolution )
{
int3 blockCoordinates = GetBlockCoordinates( blockResolution, __mul24(blockIdx.y, gridDim.x) + blockIdx.x );
int3 blockOrigin = GetBlockOrigin( blockDim, blockCoordinates );
int3 coordinates = blockOrigin + threadIdx;
int3 size = toInt3( buffer.mSize );
bool projected = ProjectionToInterval( coordinates, make_int3(0,0,0), size );
int idx = IdxFromCoordStrides( coordinates, buffer.mStrides );
if ( !projected ) {
if( buffer.mData[idx] == 0 ) {
atomicAdd( &foundZero, 1 );
}
}
}
uint64
isNonzero( Buffer3D< uint32 > buffer )
{
CheckCudaErrorState( "Before isNonzero()" );
uint64 foundZero = 0;
hipMemcpyToSymbol( "foundZero", &(foundZero = 0), sizeof(uint64), 0, hipMemcpyHostToDevice );
dim3 blockSize( 8, 8, 8 );
int3 blockResolution = GetBlockResolution( buffer.mSize, blockSize, make_int3(0,0,0) );
dim3 gridSize( blockResolution.x * blockResolution.y, blockResolution.z, 1 );
hipLaunchKernelGGL(( isNonzeroKernel), dim3(gridSize), dim3(blockSize) , 0, 0, buffer, blockResolution );
hipDeviceSynchronize();
hipMemcpyFromSymbol( &foundZero, "foundZero", sizeof(uint64), 0, hipMemcpyDeviceToHost );
CheckCudaErrorState( "After isNonzero()" );
return foundZero;
}
template< typename RegionType >
void
RegionBorderDetection3D( RegionType input, M4D::Imaging::MaskRegion3D output )
{
typedef typename RegionType::ElementType TElement;
typedef Buffer3D< TElement > Buffer;
typedef Buffer3D< uint8 > MaskBuffer;
Buffer inBuffer = CudaBuffer3DFromImageRegionCopy( input );
MaskBuffer outBuffer = CudaBuffer3DFromImageRegion( output );
RegionBorderDetection3DFtor< TElement > filter;
//int3 radius = filter.radius;
dim3 blockSize( 8, 8, 8 );
int3 blockResolution = GetBlockResolution( inBuffer.mSize, blockSize, make_int3(0,0,0) );
dim3 gridSize( blockResolution.x * blockResolution.y, blockResolution.z, 1 );
M4D::Common::Clock clock;
CheckCudaErrorState( "Before kernel execution" );
hipLaunchKernelGGL(( FilterKernel3D< TElement, uint8, RegionBorderDetection3DFtor< TElement > >)
, dim3(gridSize), dim3(blockSize) , 0, 0,
inBuffer,
outBuffer,
blockResolution,
filter
);
hipDeviceSynchronize();
CheckCudaErrorState( "After kernel execution" );
D_PRINT( "Computations took " << clock.SecondsPassed() )
hipMemcpy(output.GetPointer(), outBuffer.mData, outBuffer.mLength * sizeof(uint8), hipMemcpyDeviceToHost );
CheckCudaErrorState( "Copy back" );
//hipFree( inBuffer.mData );
//hipFree( outBuffer.mData );
CheckCudaErrorState( "Free memory" );
}
template< typename TEType >
void
watershedTransformation3D( M4D::Imaging::ImageRegion< uint32, 3 > aLabeledMarkerRegions, M4D::Imaging::ImageRegion< TEType, 3 > aInput, M4D::Imaging::ImageRegion< uint32, 3 > aOutput )
{
D_PRINT( "Before " << __FUNCTION__ << ": " << cudaMemoryInfoText() );
typedef typename TypeTraits< TEType >::SuperiorSignedType SignedElement;
//typedef typename TypeTraits< TEType >::SignedClosestType SignedElement;
//typedef typename TypeTraits< TEType >::SuperiorFloatType SignedElement;
int wshedUpdated = 1;
Buffer3D< uint32 > labeledRegionsBuffer = CudaBuffer3DFromImageRegionCopy( aLabeledMarkerRegions );
Buffer3D< TEType > inputBuffer = CudaBuffer3DFromImageRegionCopy( aInput );
Buffer3D< SignedElement > tmpBuffer = CudaPrepareBuffer<SignedElement>( aInput.GetSize() );
Buffer3D< uint32 > labeledRegionsBuffer2 = CudaBuffer3DFromImageRegion( aLabeledMarkerRegions );
Buffer3D< SignedElement > tmpBuffer2 = CudaPrepareBuffer<SignedElement>( aInput.GetSize() );
ASSERT( labeledRegionsBuffer.mStrides == labeledRegionsBuffer2.mStrides );
ASSERT( tmpBuffer.mStrides == tmpBuffer2.mStrides );
//int3 radius = make_int3( 1, 1, 1 );
D_PRINT( "After allocation in " << __FUNCTION__ << ": " << cudaMemoryInfoText() );
dim3 blockSize1D( 512 );
dim3 gridSize1D( (inputBuffer.mLength + 64*blockSize1D.x - 1) / (64*blockSize1D.x) , 64 );
dim3 blockSize3D( 8, 8, 8 );
int3 blockResolution3D = GetBlockResolution( inputBuffer.mSize, blockSize3D, make_int3(0,0,0) );
dim3 gridSize3D( blockResolution3D.x * blockResolution3D.y, blockResolution3D.z, 1 );
M4D::Common::Clock clock;
D_PRINT( "InitWatershedBuffers()" );
D_PRINT( "TypeTraits<SignedElement>::Max = " << TypeTraits<SignedElement>::Max );
hipLaunchKernelGGL(( InitWatershedBuffers), dim3(gridSize1D), dim3(blockSize1D) , 0, 0, labeledRegionsBuffer, tmpBuffer, TypeTraits<SignedElement>::Max );
unsigned i = 0;
while (wshedUpdated != 0 && i < 1000) {
hipMemcpyToSymbol( "wshedUpdated", &(wshedUpdated = 0), sizeof(int), 0, hipMemcpyHostToDevice );
/*WShedEvolution<<< gridSize3D, blockSize3D >>>(
inputBuffer,
labeledRegionsBuffer,
tmpBuffer,
blockResolution3D,
TypeTraits<SignedElement>::Max
);*/
hipLaunchKernelGGL(( WShedEvolution), dim3(gridSize3D), dim3(blockSize3D) , 0, 0,
inputBuffer,
labeledRegionsBuffer,
tmpBuffer,
labeledRegionsBuffer2,
tmpBuffer2,
blockResolution3D,
TypeTraits<SignedElement>::Max
);
using std::swap;
swap( labeledRegionsBuffer, labeledRegionsBuffer2 );
swap( tmpBuffer, tmpBuffer2 );
hipDeviceSynchronize();
hipMemcpyFromSymbol( &wshedUpdated, "wshedUpdated", sizeof(int), 0, hipMemcpyDeviceToHost );
++i;
}
D_PRINT( "wshedUpdated = " << wshedUpdated );
LOG( "WatershedTransformation3D computations took " << clock.SecondsPassed() << " and " << i << " iterations" )
LOG( "number of zero voxels = " << isNonzero( labeledRegionsBuffer ) );
hipMemcpy(aOutput.GetPointer(), labeledRegionsBuffer.mData, labeledRegionsBuffer.mLength * sizeof(uint32), hipMemcpyDeviceToHost );
//hipFree( labeledRegionsBuffer.mData );
//hipFree( inputBuffer.mData );
//hipFree( labeledRegionsBuffer2.mData );
//hipFree( tmpBuffer2.mData );
/*typename M4D::Imaging::Image< SignedElement, 3 >::Ptr tmpDebugImage = M4D::Imaging::ImageFactory::CreateEmptyImageFromExtents< SignedElement, 3 >( aLabeledMarkerRegions.GetMinimum(), aLabeledMarkerRegions.GetMaximum(), aLabeledMarkerRegions.GetElementExtents() );
hipMemcpy(tmpDebugImage->GetRegion().GetPointer(), tmpBuffer.mData, labeledRegionsBuffer.mLength * sizeof(SignedElement), hipMemcpyDeviceToHost );
M4D::Imaging::ImageFactory::DumpImage( "Intermediate.dump", *tmpDebugImage );
*/
//hipFree( tmpBuffer.mData );
D_PRINT( "After " << __FUNCTION__ << ": " << cudaMemoryInfoText() );
}
#define DECLARE_TEMPLATE_INSTANCE template void watershedTransformation3D( M4D::Imaging::ImageRegion< uint32, 3 > aLabeledMarkerRegions, M4D::Imaging::ImageRegion< TTYPE, 3 > aInput, M4D::Imaging::ImageRegion< uint32, 3 > aOutput );
#include "MedV4D/Common/DeclareTemplateNumericInstances.h"
#define DECLARE_TEMPLATE_INSTANCE template void RegionBorderDetection3D( M4D::Imaging::ImageRegion< TTYPE, 3 > input, M4D::Imaging::MaskRegion3D output );
#include "MedV4D/Common/DeclareTemplateNumericInstances.h"
| d60c39f39e15f18bd333e96004064ec1d9fa0945.cu | #include "MedV4D/Imaging/cuda/detail/WatershedTransformation.cuh"
#include "MedV4D/Imaging/ImageRegion.h"
__device__ uint64 foundZero;
__global__ void
isNonzeroKernel( Buffer3D< uint32 > buffer, int3 blockResolution )
{
int3 blockCoordinates = GetBlockCoordinates( blockResolution, __mul24(blockIdx.y, gridDim.x) + blockIdx.x );
int3 blockOrigin = GetBlockOrigin( blockDim, blockCoordinates );
int3 coordinates = blockOrigin + threadIdx;
int3 size = toInt3( buffer.mSize );
bool projected = ProjectionToInterval( coordinates, make_int3(0,0,0), size );
int idx = IdxFromCoordStrides( coordinates, buffer.mStrides );
if ( !projected ) {
if( buffer.mData[idx] == 0 ) {
atomicAdd( &foundZero, 1 );
}
}
}
uint64
isNonzero( Buffer3D< uint32 > buffer )
{
CheckCudaErrorState( "Before isNonzero()" );
uint64 foundZero = 0;
cudaMemcpyToSymbol( "foundZero", &(foundZero = 0), sizeof(uint64), 0, cudaMemcpyHostToDevice );
dim3 blockSize( 8, 8, 8 );
int3 blockResolution = GetBlockResolution( buffer.mSize, blockSize, make_int3(0,0,0) );
dim3 gridSize( blockResolution.x * blockResolution.y, blockResolution.z, 1 );
isNonzeroKernel<<< gridSize, blockSize >>>( buffer, blockResolution );
cudaThreadSynchronize();
cudaMemcpyFromSymbol( &foundZero, "foundZero", sizeof(uint64), 0, cudaMemcpyDeviceToHost );
CheckCudaErrorState( "After isNonzero()" );
return foundZero;
}
template< typename RegionType >
void
RegionBorderDetection3D( RegionType input, M4D::Imaging::MaskRegion3D output )
{
typedef typename RegionType::ElementType TElement;
typedef Buffer3D< TElement > Buffer;
typedef Buffer3D< uint8 > MaskBuffer;
Buffer inBuffer = CudaBuffer3DFromImageRegionCopy( input );
MaskBuffer outBuffer = CudaBuffer3DFromImageRegion( output );
RegionBorderDetection3DFtor< TElement > filter;
//int3 radius = filter.radius;
dim3 blockSize( 8, 8, 8 );
int3 blockResolution = GetBlockResolution( inBuffer.mSize, blockSize, make_int3(0,0,0) );
dim3 gridSize( blockResolution.x * blockResolution.y, blockResolution.z, 1 );
M4D::Common::Clock clock;
CheckCudaErrorState( "Before kernel execution" );
FilterKernel3D< TElement, uint8, RegionBorderDetection3DFtor< TElement > >
<<< gridSize, blockSize >>>(
inBuffer,
outBuffer,
blockResolution,
filter
);
cudaThreadSynchronize();
CheckCudaErrorState( "After kernel execution" );
D_PRINT( "Computations took " << clock.SecondsPassed() )
cudaMemcpy(output.GetPointer(), outBuffer.mData, outBuffer.mLength * sizeof(uint8), cudaMemcpyDeviceToHost );
CheckCudaErrorState( "Copy back" );
//cudaFree( inBuffer.mData );
//cudaFree( outBuffer.mData );
CheckCudaErrorState( "Free memory" );
}
template< typename TEType >
void
watershedTransformation3D( M4D::Imaging::ImageRegion< uint32, 3 > aLabeledMarkerRegions, M4D::Imaging::ImageRegion< TEType, 3 > aInput, M4D::Imaging::ImageRegion< uint32, 3 > aOutput )
{
D_PRINT( "Before " << __FUNCTION__ << ": " << cudaMemoryInfoText() );
typedef typename TypeTraits< TEType >::SuperiorSignedType SignedElement;
//typedef typename TypeTraits< TEType >::SignedClosestType SignedElement;
//typedef typename TypeTraits< TEType >::SuperiorFloatType SignedElement;
int wshedUpdated = 1;
Buffer3D< uint32 > labeledRegionsBuffer = CudaBuffer3DFromImageRegionCopy( aLabeledMarkerRegions );
Buffer3D< TEType > inputBuffer = CudaBuffer3DFromImageRegionCopy( aInput );
Buffer3D< SignedElement > tmpBuffer = CudaPrepareBuffer<SignedElement>( aInput.GetSize() );
Buffer3D< uint32 > labeledRegionsBuffer2 = CudaBuffer3DFromImageRegion( aLabeledMarkerRegions );
Buffer3D< SignedElement > tmpBuffer2 = CudaPrepareBuffer<SignedElement>( aInput.GetSize() );
ASSERT( labeledRegionsBuffer.mStrides == labeledRegionsBuffer2.mStrides );
ASSERT( tmpBuffer.mStrides == tmpBuffer2.mStrides );
//int3 radius = make_int3( 1, 1, 1 );
D_PRINT( "After allocation in " << __FUNCTION__ << ": " << cudaMemoryInfoText() );
dim3 blockSize1D( 512 );
dim3 gridSize1D( (inputBuffer.mLength + 64*blockSize1D.x - 1) / (64*blockSize1D.x) , 64 );
dim3 blockSize3D( 8, 8, 8 );
int3 blockResolution3D = GetBlockResolution( inputBuffer.mSize, blockSize3D, make_int3(0,0,0) );
dim3 gridSize3D( blockResolution3D.x * blockResolution3D.y, blockResolution3D.z, 1 );
M4D::Common::Clock clock;
D_PRINT( "InitWatershedBuffers()" );
D_PRINT( "TypeTraits<SignedElement>::Max = " << TypeTraits<SignedElement>::Max );
InitWatershedBuffers<<< gridSize1D, blockSize1D >>>( labeledRegionsBuffer, tmpBuffer, TypeTraits<SignedElement>::Max );
unsigned i = 0;
while (wshedUpdated != 0 && i < 1000) {
cudaMemcpyToSymbol( "wshedUpdated", &(wshedUpdated = 0), sizeof(int), 0, cudaMemcpyHostToDevice );
/*WShedEvolution<<< gridSize3D, blockSize3D >>>(
inputBuffer,
labeledRegionsBuffer,
tmpBuffer,
blockResolution3D,
TypeTraits<SignedElement>::Max
);*/
WShedEvolution<<< gridSize3D, blockSize3D >>>(
inputBuffer,
labeledRegionsBuffer,
tmpBuffer,
labeledRegionsBuffer2,
tmpBuffer2,
blockResolution3D,
TypeTraits<SignedElement>::Max
);
using std::swap;
swap( labeledRegionsBuffer, labeledRegionsBuffer2 );
swap( tmpBuffer, tmpBuffer2 );
cudaThreadSynchronize();
cudaMemcpyFromSymbol( &wshedUpdated, "wshedUpdated", sizeof(int), 0, cudaMemcpyDeviceToHost );
++i;
}
D_PRINT( "wshedUpdated = " << wshedUpdated );
LOG( "WatershedTransformation3D computations took " << clock.SecondsPassed() << " and " << i << " iterations" )
LOG( "number of zero voxels = " << isNonzero( labeledRegionsBuffer ) );
cudaMemcpy(aOutput.GetPointer(), labeledRegionsBuffer.mData, labeledRegionsBuffer.mLength * sizeof(uint32), cudaMemcpyDeviceToHost );
//cudaFree( labeledRegionsBuffer.mData );
//cudaFree( inputBuffer.mData );
//cudaFree( labeledRegionsBuffer2.mData );
//cudaFree( tmpBuffer2.mData );
/*typename M4D::Imaging::Image< SignedElement, 3 >::Ptr tmpDebugImage = M4D::Imaging::ImageFactory::CreateEmptyImageFromExtents< SignedElement, 3 >( aLabeledMarkerRegions.GetMinimum(), aLabeledMarkerRegions.GetMaximum(), aLabeledMarkerRegions.GetElementExtents() );
cudaMemcpy(tmpDebugImage->GetRegion().GetPointer(), tmpBuffer.mData, labeledRegionsBuffer.mLength * sizeof(SignedElement), cudaMemcpyDeviceToHost );
M4D::Imaging::ImageFactory::DumpImage( "Intermediate.dump", *tmpDebugImage );
*/
//cudaFree( tmpBuffer.mData );
D_PRINT( "After " << __FUNCTION__ << ": " << cudaMemoryInfoText() );
}
#define DECLARE_TEMPLATE_INSTANCE template void watershedTransformation3D( M4D::Imaging::ImageRegion< uint32, 3 > aLabeledMarkerRegions, M4D::Imaging::ImageRegion< TTYPE, 3 > aInput, M4D::Imaging::ImageRegion< uint32, 3 > aOutput );
#include "MedV4D/Common/DeclareTemplateNumericInstances.h"
#define DECLARE_TEMPLATE_INSTANCE template void RegionBorderDetection3D( M4D::Imaging::ImageRegion< TTYPE, 3 > input, M4D::Imaging::MaskRegion3D output );
#include "MedV4D/Common/DeclareTemplateNumericInstances.h"
|
8b98dbf5ec15dc533437fc9d39a07954e7d67ba5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <bitmask/valid_if.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column.hpp>
#include <cudf/functions.h>
#include <cudf/null_mask.hpp>
#include <utilities/error_utils.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/transform_reduce.h>
#include <thrust/transform_scan.h>
#include <thrust/for_each.h>
namespace cudf {
// Create a strings-type column from array of pointer/size pairs
std::unique_ptr<column> make_strings_column(
const rmm::device_vector<thrust::pair<const char*,size_type>>& strings,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
size_type num_strings = strings.size();
// maybe a separate factory for creating null strings-column
CUDF_EXPECTS(num_strings > 0, "must specify at least one pair");
auto execpol = rmm::exec_policy(stream);
auto d_strings = strings.data().get();
// check total size is not too large for cudf column
size_t bytes = thrust::transform_reduce( execpol->on(stream),
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(num_strings),
[d_strings] __device__ (size_t idx) {
auto item = d_strings[idx];
return (item.first!=nullptr) ? item.second : 0;
},
0, thrust::plus<size_t>());
CUDF_EXPECTS( bytes < std::numeric_limits<size_type>::max(), "total size of strings is too large for cudf column" );
// build offsets column -- last entry is the total size
auto offsets_column = make_numeric_column( data_type{INT32}, num_strings+1, mask_state::UNALLOCATED, stream, mr );
auto offsets_view = offsets_column->mutable_view();
auto d_offsets = offsets_view.data<int32_t>();
// Using inclusive-scan to compute last entry which is the total size.
// Exclusive-scan is possible but will not compute that last entry.
// Rather than manually computing the final offset using values in device memory,
// we use inclusive-scan on a shifted output (d_offsets+1) and then set the first
// zero offset manually.
thrust::transform_inclusive_scan( execpol->on(stream),
thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(num_strings),
d_offsets+1, // fills in offsets entries [1,num_strings]
[d_strings] __device__ (size_type idx) {
thrust::pair<const char*,size_type> item = d_strings[idx];
return ( item.first!=nullptr ? static_cast<int32_t>(item.second) : 0 );
},
thrust::plus<int32_t>() );
// set the first offset to 0
CUDA_TRY(hipMemsetAsync( d_offsets, 0, sizeof(*d_offsets), stream));
// create null mask
auto valid_mask = valid_if( static_cast<const bit_mask_t*>(nullptr),
[d_strings] __device__ (size_type idx) { return d_strings[idx].first!=nullptr; },
num_strings, stream );
auto null_count = valid_mask.second;
rmm::device_buffer null_mask(valid_mask.first, gdf_valid_allocation_size(num_strings),
stream, mr);
RMM_TRY( RMM_FREE(valid_mask.first,stream) ); // TODO valid_if to return device_buffer in future
// if we have all nulls, a null chars column is allowed
// if all non-null strings are empty strings, we need a non-null chars column
// - in this case we set the bytes to 1 to create a minimal one-byte chars column
if( (bytes==0) && (null_count < num_strings) )
bytes = 1; // all entries are empty strings
// build chars column
auto chars_column = make_numeric_column( data_type{INT8}, bytes, mask_state::UNALLOCATED, stream, mr );
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), num_strings,
[d_strings, d_offsets, d_chars] __device__(size_type idx){
// place individual strings
auto item = d_strings[idx];
if( item.first!=nullptr )
memcpy(d_chars + d_offsets[idx], item.first, item.second );
});
// build children vector
std::vector<std::unique_ptr<column>> children;
children.emplace_back(std::move(offsets_column));
children.emplace_back(std::move(chars_column));
// no data-ptr with num_strings elements plus children
return std::make_unique<column>(
data_type{STRING}, num_strings, rmm::device_buffer{0,stream,mr},
null_mask, null_count,
std::move(children));
}
// Create a strings-type column from array of chars and array of offsets.
std::unique_ptr<column> make_strings_column(
const rmm::device_vector<char>& strings,
const rmm::device_vector<size_type>& offsets,
const rmm::device_vector<bitmask_type>& valid_mask,
size_type null_count,
hipStream_t stream,
rmm::mr::device_memory_resource* mr )
{
size_type num_strings = offsets.size()-1;
CUDF_EXPECTS( num_strings > 0, "strings count must be greater than 0");
CUDF_EXPECTS( null_count < num_strings, "null strings column not yet supported");
if( null_count > 0 ) {
CUDF_EXPECTS( !valid_mask.empty(), "Cannot have null elements without a null mask." );
}
auto execpol = rmm::exec_policy(stream);
size_type bytes = offsets.back() - offsets[0];
CUDF_EXPECTS( bytes >=0, "invalid offsets vector");
// build offsets column -- this is the number of strings + 1
auto offsets_column = make_numeric_column( data_type{INT32}, num_strings+1, mask_state::UNALLOCATED, stream, mr );
auto offsets_view = offsets_column->mutable_view();
CUDA_TRY(hipMemcpyAsync( offsets_view.data<int32_t>(), offsets.data().get(),
(num_strings+1)*sizeof(int32_t),
hipMemcpyDeviceToDevice, stream ));
// build null bitmask
rmm::device_buffer null_mask;
if( null_count )
null_mask = rmm::device_buffer(valid_mask.data().get(),
gdf_valid_allocation_size(num_strings),
stream, mr);
// build chars column
auto chars_column = make_numeric_column( data_type{INT8}, bytes, mask_state::UNALLOCATED, stream, mr );
auto chars_view = chars_column->mutable_view();
CUDA_TRY(hipMemcpyAsync( chars_view.data<char>(), strings.data().get(), bytes,
hipMemcpyDeviceToDevice, stream ));
// build children vector
std::vector<std::unique_ptr<column>> children;
children.emplace_back(std::move(offsets_column));
children.emplace_back(std::move(chars_column));
//
return std::make_unique<column>(
data_type{STRING}, num_strings, rmm::device_buffer{0,stream,mr},
null_mask, null_count,
std::move(children));
}
} // namespace cudf
| 8b98dbf5ec15dc533437fc9d39a07954e7d67ba5.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <bitmask/valid_if.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column.hpp>
#include <cudf/functions.h>
#include <cudf/null_mask.hpp>
#include <utilities/error_utils.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/transform_reduce.h>
#include <thrust/transform_scan.h>
#include <thrust/for_each.h>
namespace cudf {
// Create a strings-type column from array of pointer/size pairs
std::unique_ptr<column> make_strings_column(
const rmm::device_vector<thrust::pair<const char*,size_type>>& strings,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
size_type num_strings = strings.size();
// maybe a separate factory for creating null strings-column
CUDF_EXPECTS(num_strings > 0, "must specify at least one pair");
auto execpol = rmm::exec_policy(stream);
auto d_strings = strings.data().get();
// check total size is not too large for cudf column
size_t bytes = thrust::transform_reduce( execpol->on(stream),
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(num_strings),
[d_strings] __device__ (size_t idx) {
auto item = d_strings[idx];
return (item.first!=nullptr) ? item.second : 0;
},
0, thrust::plus<size_t>());
CUDF_EXPECTS( bytes < std::numeric_limits<size_type>::max(), "total size of strings is too large for cudf column" );
// build offsets column -- last entry is the total size
auto offsets_column = make_numeric_column( data_type{INT32}, num_strings+1, mask_state::UNALLOCATED, stream, mr );
auto offsets_view = offsets_column->mutable_view();
auto d_offsets = offsets_view.data<int32_t>();
// Using inclusive-scan to compute last entry which is the total size.
// Exclusive-scan is possible but will not compute that last entry.
// Rather than manually computing the final offset using values in device memory,
// we use inclusive-scan on a shifted output (d_offsets+1) and then set the first
// zero offset manually.
thrust::transform_inclusive_scan( execpol->on(stream),
thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(num_strings),
d_offsets+1, // fills in offsets entries [1,num_strings]
[d_strings] __device__ (size_type idx) {
thrust::pair<const char*,size_type> item = d_strings[idx];
return ( item.first!=nullptr ? static_cast<int32_t>(item.second) : 0 );
},
thrust::plus<int32_t>() );
// set the first offset to 0
CUDA_TRY(cudaMemsetAsync( d_offsets, 0, sizeof(*d_offsets), stream));
// create null mask
auto valid_mask = valid_if( static_cast<const bit_mask_t*>(nullptr),
[d_strings] __device__ (size_type idx) { return d_strings[idx].first!=nullptr; },
num_strings, stream );
auto null_count = valid_mask.second;
rmm::device_buffer null_mask(valid_mask.first, gdf_valid_allocation_size(num_strings),
stream, mr);
RMM_TRY( RMM_FREE(valid_mask.first,stream) ); // TODO valid_if to return device_buffer in future
// if we have all nulls, a null chars column is allowed
// if all non-null strings are empty strings, we need a non-null chars column
// - in this case we set the bytes to 1 to create a minimal one-byte chars column
if( (bytes==0) && (null_count < num_strings) )
bytes = 1; // all entries are empty strings
// build chars column
auto chars_column = make_numeric_column( data_type{INT8}, bytes, mask_state::UNALLOCATED, stream, mr );
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), num_strings,
[d_strings, d_offsets, d_chars] __device__(size_type idx){
// place individual strings
auto item = d_strings[idx];
if( item.first!=nullptr )
memcpy(d_chars + d_offsets[idx], item.first, item.second );
});
// build children vector
std::vector<std::unique_ptr<column>> children;
children.emplace_back(std::move(offsets_column));
children.emplace_back(std::move(chars_column));
// no data-ptr with num_strings elements plus children
return std::make_unique<column>(
data_type{STRING}, num_strings, rmm::device_buffer{0,stream,mr},
null_mask, null_count,
std::move(children));
}
// Create a strings-type column from array of chars and array of offsets.
std::unique_ptr<column> make_strings_column(
const rmm::device_vector<char>& strings,
const rmm::device_vector<size_type>& offsets,
const rmm::device_vector<bitmask_type>& valid_mask,
size_type null_count,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr )
{
size_type num_strings = offsets.size()-1;
CUDF_EXPECTS( num_strings > 0, "strings count must be greater than 0");
CUDF_EXPECTS( null_count < num_strings, "null strings column not yet supported");
if( null_count > 0 ) {
CUDF_EXPECTS( !valid_mask.empty(), "Cannot have null elements without a null mask." );
}
auto execpol = rmm::exec_policy(stream);
size_type bytes = offsets.back() - offsets[0];
CUDF_EXPECTS( bytes >=0, "invalid offsets vector");
// build offsets column -- this is the number of strings + 1
auto offsets_column = make_numeric_column( data_type{INT32}, num_strings+1, mask_state::UNALLOCATED, stream, mr );
auto offsets_view = offsets_column->mutable_view();
CUDA_TRY(cudaMemcpyAsync( offsets_view.data<int32_t>(), offsets.data().get(),
(num_strings+1)*sizeof(int32_t),
cudaMemcpyDeviceToDevice, stream ));
// build null bitmask
rmm::device_buffer null_mask;
if( null_count )
null_mask = rmm::device_buffer(valid_mask.data().get(),
gdf_valid_allocation_size(num_strings),
stream, mr);
// build chars column
auto chars_column = make_numeric_column( data_type{INT8}, bytes, mask_state::UNALLOCATED, stream, mr );
auto chars_view = chars_column->mutable_view();
CUDA_TRY(cudaMemcpyAsync( chars_view.data<char>(), strings.data().get(), bytes,
cudaMemcpyDeviceToDevice, stream ));
// build children vector
std::vector<std::unique_ptr<column>> children;
children.emplace_back(std::move(offsets_column));
children.emplace_back(std::move(chars_column));
//
return std::make_unique<column>(
data_type{STRING}, num_strings, rmm::device_buffer{0,stream,mr},
null_mask, null_count,
std::move(children));
}
} // namespace cudf
|
8d02f9d19e2f29e7a1dce21cecf97c899197877a.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) OpenMMLab. All rights reserved
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/transform.h>
#include <cmath>
#include <vector>
#include "common_cuda_helper.hpp"
#include "scaled_dot_product_attention_kernel.hpp"
#include "trt_plugin_helper.hpp"
template <typename scalar_t>
hipblasStatus_t cublasgemmStridedBatchedWrap(hipblasHandle_t handle, hipblasOperation_t transa,
hipblasOperation_t transb, int m, int n, int k,
const scalar_t* alpha, const scalar_t* A, int lda,
long long int strideA, const scalar_t* B, int ldb,
long long int strideB, const scalar_t* beta,
scalar_t* C, int ldc, long long int strideC,
int batchCount);
template <>
hipblasStatus_t cublasgemmStridedBatchedWrap<float>(hipblasHandle_t handle, hipblasOperation_t transa,
hipblasOperation_t transb, int m, int n, int k,
const float* alpha, const float* A, int lda,
long long int strideA, const float* B, int ldb,
long long int strideB, const float* beta,
float* C, int ldc, long long int strideC,
int batchCount) {
return hipblasSgemmStridedBatched(handle, transa, transb, m, n, k, alpha, A, lda, strideA, B, ldb,
strideB, beta, C, ldc, strideC, batchCount);
}
template <>
hipblasStatus_t cublasgemmStridedBatchedWrap<__half>(hipblasHandle_t handle, hipblasOperation_t transa,
hipblasOperation_t transb, int m, int n, int k,
const __half* alpha, const __half* A, int lda,
long long int strideA, const __half* B, int ldb,
long long int strideB, const __half* beta,
__half* C, int ldc, long long int strideC,
int batchCount) {
return hipblasHgemmStridedBatched(handle, transa, transb, m, n, k, alpha, A, lda, strideA, B, ldb,
strideB, beta, C, ldc, strideC, batchCount);
}
template <typename scalar_t>
void dot_product_attention_impl(const scalar_t* query, const scalar_t* key, const scalar_t* value,
const scalar_t* mask, scalar_t* attn, scalar_t* output, int B,
int Nt, int Ns, int E, const int* mask_dims,
cudnnTensorDescriptor_t& x_desc, cudnnTensorDescriptor_t& y_desc,
cudnnTensorDescriptor_t& mask_desc, cudnnDataType_t cudnn_dtype,
hipStream_t stream, hipblasHandle_t cublas_handle,
cudnnHandle_t cudnn_handle) {
{
// Q @ K
const int m = Ns;
const int n = Nt;
const int k = E;
const auto alpha = scalar_t(1.0f / sqrt(float(E)));
const auto beta = scalar_t(0);
cublasgemmStridedBatchedWrap(cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, m, n, k, &alpha, key, k,
Ns * E, query, k, Nt * E, &beta, attn, m, Nt * Ns, B);
}
if (mask_dims != nullptr && mask_dims[0] != 0) {
const auto alpha = scalar_t(1);
const auto beta = scalar_t(1);
cudnnSetTensor4dDescriptor(mask_desc, CUDNN_TENSOR_NCHW, cudnn_dtype, 1, mask_dims[0],
mask_dims[1], mask_dims[2]);
cudnnSetTensor4dDescriptor(x_desc, CUDNN_TENSOR_NCHW, cudnn_dtype, 1, B, Nt, Ns);
cudnnAddTensor(cudnn_handle, &alpha, mask_desc, mask, &beta, x_desc, attn);
}
{
// softmax attention
const auto alpha = scalar_t(1);
const auto beta = scalar_t(0);
cudnnSetTensor4dDescriptor(x_desc, CUDNN_TENSOR_NCHW, cudnn_dtype, B * Nt, Ns, 1, 1);
cudnnSetTensor4dDescriptor(y_desc, CUDNN_TENSOR_NCHW, cudnn_dtype, B * Nt, Ns, 1, 1);
cudnnSoftmaxForward(cudnn_handle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, &alpha,
x_desc, attn, &beta, y_desc, attn);
}
{
// attn @ v
const int m = E;
const int n = Nt;
const int k = Ns;
const auto alpha = scalar_t(1);
const auto beta = scalar_t(0);
cublasgemmStridedBatchedWrap(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &alpha, value, m,
Ns * E, (const scalar_t*)(attn), k, Ns * Nt, &beta, output, m,
Nt * E, B);
}
}
template void dot_product_attention_impl<float>(
const float* query, const float* key, const float* value, const float* mask, float* attn,
float* output, int B, int Nt, int Ns, int E, const int* mask_dims,
cudnnTensorDescriptor_t& x_desc, cudnnTensorDescriptor_t& y_desc,
cudnnTensorDescriptor_t& mask_desc, cudnnDataType_t cudnn_dtype, hipStream_t stream,
hipblasHandle_t cublas_handle, cudnnHandle_t cudnn_handle);
| 8d02f9d19e2f29e7a1dce21cecf97c899197877a.cu | // Copyright (c) OpenMMLab. All rights reserved
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/transform.h>
#include <cmath>
#include <vector>
#include "common_cuda_helper.hpp"
#include "scaled_dot_product_attention_kernel.hpp"
#include "trt_plugin_helper.hpp"
template <typename scalar_t>
cublasStatus_t cublasgemmStridedBatchedWrap(cublasHandle_t handle, cublasOperation_t transa,
cublasOperation_t transb, int m, int n, int k,
const scalar_t* alpha, const scalar_t* A, int lda,
long long int strideA, const scalar_t* B, int ldb,
long long int strideB, const scalar_t* beta,
scalar_t* C, int ldc, long long int strideC,
int batchCount);
template <>
cublasStatus_t cublasgemmStridedBatchedWrap<float>(cublasHandle_t handle, cublasOperation_t transa,
cublasOperation_t transb, int m, int n, int k,
const float* alpha, const float* A, int lda,
long long int strideA, const float* B, int ldb,
long long int strideB, const float* beta,
float* C, int ldc, long long int strideC,
int batchCount) {
return cublasSgemmStridedBatched(handle, transa, transb, m, n, k, alpha, A, lda, strideA, B, ldb,
strideB, beta, C, ldc, strideC, batchCount);
}
template <>
cublasStatus_t cublasgemmStridedBatchedWrap<__half>(cublasHandle_t handle, cublasOperation_t transa,
cublasOperation_t transb, int m, int n, int k,
const __half* alpha, const __half* A, int lda,
long long int strideA, const __half* B, int ldb,
long long int strideB, const __half* beta,
__half* C, int ldc, long long int strideC,
int batchCount) {
return cublasHgemmStridedBatched(handle, transa, transb, m, n, k, alpha, A, lda, strideA, B, ldb,
strideB, beta, C, ldc, strideC, batchCount);
}
template <typename scalar_t>
void dot_product_attention_impl(const scalar_t* query, const scalar_t* key, const scalar_t* value,
const scalar_t* mask, scalar_t* attn, scalar_t* output, int B,
int Nt, int Ns, int E, const int* mask_dims,
cudnnTensorDescriptor_t& x_desc, cudnnTensorDescriptor_t& y_desc,
cudnnTensorDescriptor_t& mask_desc, cudnnDataType_t cudnn_dtype,
cudaStream_t stream, cublasHandle_t cublas_handle,
cudnnHandle_t cudnn_handle) {
{
// Q @ K
const int m = Ns;
const int n = Nt;
const int k = E;
const auto alpha = scalar_t(1.0f / sqrt(float(E)));
const auto beta = scalar_t(0);
cublasgemmStridedBatchedWrap(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, key, k,
Ns * E, query, k, Nt * E, &beta, attn, m, Nt * Ns, B);
}
if (mask_dims != nullptr && mask_dims[0] != 0) {
const auto alpha = scalar_t(1);
const auto beta = scalar_t(1);
cudnnSetTensor4dDescriptor(mask_desc, CUDNN_TENSOR_NCHW, cudnn_dtype, 1, mask_dims[0],
mask_dims[1], mask_dims[2]);
cudnnSetTensor4dDescriptor(x_desc, CUDNN_TENSOR_NCHW, cudnn_dtype, 1, B, Nt, Ns);
cudnnAddTensor(cudnn_handle, &alpha, mask_desc, mask, &beta, x_desc, attn);
}
{
// softmax attention
const auto alpha = scalar_t(1);
const auto beta = scalar_t(0);
cudnnSetTensor4dDescriptor(x_desc, CUDNN_TENSOR_NCHW, cudnn_dtype, B * Nt, Ns, 1, 1);
cudnnSetTensor4dDescriptor(y_desc, CUDNN_TENSOR_NCHW, cudnn_dtype, B * Nt, Ns, 1, 1);
cudnnSoftmaxForward(cudnn_handle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, &alpha,
x_desc, attn, &beta, y_desc, attn);
}
{
// attn @ v
const int m = E;
const int n = Nt;
const int k = Ns;
const auto alpha = scalar_t(1);
const auto beta = scalar_t(0);
cublasgemmStridedBatchedWrap(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, value, m,
Ns * E, (const scalar_t*)(attn), k, Ns * Nt, &beta, output, m,
Nt * E, B);
}
}
template void dot_product_attention_impl<float>(
const float* query, const float* key, const float* value, const float* mask, float* attn,
float* output, int B, int Nt, int Ns, int E, const int* mask_dims,
cudnnTensorDescriptor_t& x_desc, cudnnTensorDescriptor_t& y_desc,
cudnnTensorDescriptor_t& mask_desc, cudnnDataType_t cudnn_dtype, cudaStream_t stream,
cublasHandle_t cublas_handle, cudnnHandle_t cudnn_handle);
|
81187e23a115a2d409d82eb250a980c3d490193e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "FbpClass_Agent.cuh"
#include <stdio.h>
#include "stdafx.h"
#define PI 3.1415926536f
__global__ void InitDistance(float *distance_array, const float distance, const int V)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < V)
{
distance_array[tid] = distance;
}
}
__global__ void InitU(float* u, const int N, const float du, const float offcenter)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < N)
{
u[tid] = (tid - (N - 1) / 2.0f) * du + offcenter;
}
}
__global__ void InitBeta(float* beta, const int V, const float rotation, const float totalScanAngle)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < V)
{
beta[tid] = (totalScanAngle / V * tid + rotation) * PI / 180;
}
}
__global__ void InitReconKernel_Hamming(float* reconKernel, const int N, const float du, const float t)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < 2 * N - 1)
{
// the center element index is N-1
int n = tid - (N - 1);
// ramp part
if (n == 0)
reconKernel[tid] = t / (4 * du*du);
else if (n % 2 == 0)
reconKernel[tid] = 0;
else
reconKernel[tid] = -t / (n*n * PI*PI * du*du);
// cosine part
int sgn = n % 2 == 0 ? 1 : -1;
reconKernel[tid] += (1 - t)* (sgn / (2 * PI*du*du) * (1.0f / (1 + 2 * n) + 1.0f / (1 - 2 * n))
- 1 / (PI*PI*du*du) * (1.0f / (1 + 2 * n) / (1 + 2 * n) + 1.0f / (1 - 2 * n) / (1 - 2 * n)));
}
}
__global__ void InitReconKernel_Delta(float* reconKernel, const int N, const float du, const float t)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < 2 * N - 1)
{
// the center element index is N-1
int n = tid - (N - 1);
if (n == 0)
reconKernel[tid] = t;
else
reconKernel[tid] = 0;
}
}
//initialize a Gaussian kernel
//This kernel will be used along with the ramp kernel
//delta is in number of pixels, which is the standard deviation of the gaussian
//This kernel is normalized
__global__ void InitReconKernel_GaussianApodized(float* reconKernel, const int N, const float du, const float delta)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < 1)
{
// the center element index is N-1
float temp_sum = 0;
for (int i = 0; i < 2 * N - 1; i++)
{
int n = i - (N - 1);
reconKernel[i] = exp( - float(n) * float(n) / 2.0 / delta / delta);
temp_sum = temp_sum + reconKernel[i];
}
for (int i = 0; i < 2 * N - 1; i++)
{
reconKernel[i] = reconKernel[i] / temp_sum / du;
}
}
}
__global__ void InitReconKernel_Quadratic(float* reconKernel, const int N, const float du, const int paramNum, const float p1, const float p2, const float p3)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < 2 * N - 1)
{
float a, b, c;
float kn = 1 / (2 * du);
if (paramNum == 2)
{
// p1 = t, p2 = h, p3 is ignored
a = (p2 - 1) / (kn*kn * (1 - 2 * p1));
b = -2 * a*p1*kn;
c = 1.0f;
}
else
{
a = p1;
b = p2;
c = p3;
}
reconKernel[idx] = 0.0f;
float du2 = du * du;
float du3 = du2 * du;
float du4 = du3 * du;
int n = idx - (N - 1);
if (n == 0)
{
// H3(x)
reconKernel[idx] += a / 32 / du4;
// H2(x)
reconKernel[idx] += b / 12 / du3;
// H1(x)
reconKernel[idx] += c / 4 / du2;
}
else if (n % 2 == 0)
{
// H3(x)
reconKernel[idx] += a * 3 / (8 * n*n * PI*PI * du4);
// H2(x)
reconKernel[idx] += b / (2 * n*n * PI*PI * du3);
// H1(x)
// do nothing, H1(even) is zero
}
else
{
// H3(x)
reconKernel[idx] += a * 3 / (8 * n*n * PI*PI * du4) * (4 / (n*n*PI*PI) - 1);
// H2(x)
reconKernel[idx] += -b / (2 * n*n * PI*PI * du3);
// H1(x)
reconKernel[idx] += -c / (n*n * PI*PI * du2);
}
}
}
__global__ void InitReconKernel_Polynomial(float* reconKernel, const int N, const float du, const float p6, const float p5, const float p4, const float p3, const float p2, const float p1, const float p0)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < 2 * N - 1)
{
int n = idx - (N - 1);
reconKernel[idx] = 0.0f;
float kn = 1 / (2 * du);
float du2 = du * du;
float du3 = du2 * du;
float du4 = du3 * du;
if (n == 0)
{
// H7(x)
reconKernel[idx] += p6 * powf(kn, 8) / 4;
// H6(x)
reconKernel[idx] += p5 * powf(kn, 7) * 2 / 7;
// H5(x)
reconKernel[idx] += p4 * powf(kn, 6) / 3;
// H4(x)
reconKernel[idx] += p3 * powf(kn, 5) * 2 / 5;
// H3(x)
reconKernel[idx] += p2 * powf(kn, 4) / 2;
// H2(x)
reconKernel[idx] += p1 * 2 * kn*kn*kn / 3;
// H1(x)
reconKernel[idx] += p0 * kn*kn;
}
else if (n % 2 == 0)
{
// H7(x)
reconKernel[idx] += p6 * 7 * (360 - 30 * n*n*PI*PI + powf(n*PI, 4)) / (128 * du2* powf(du*n*PI, 6));
// H6(x)
reconKernel[idx] += p5 * 3 * (120 - 20 * n*n*PI*PI + powf(n*PI, 4)) / (32 * du*powf(du*n*PI, 6));
// H5(x)
reconKernel[idx] += p4 * 5 * (n*n*PI*PI - 12) / (32 * du2 *powf(du*n*PI, 4));
// H4(x)
reconKernel[idx] += p3 * (n*n*PI*PI - 6) / (4 * du * powf(du*n*PI, 4));
// H3(x)
reconKernel[idx] += p2 * 3 / (8 * du4 * n*n * PI*PI);
// H2(x)
reconKernel[idx] += p1 / (2 * n*n *PI*PI * du3);
// H1(x)
// do nothing, H1(even) is zero
}
else
{
// H7(x)
reconKernel[idx] += p6 * 7 * (1440 - 360 * n*n*PI*PI + 30 * powf(n*PI, 4) - powf(n*PI, 6)) / (128 * powf(du*n*PI, 8));
// H6(x)
reconKernel[idx] += -p5 * 3 * (120 - 20 * n*n*PI*PI + powf(n*PI, 4)) / (32 * du*powf(du*n*PI, 6));
// H5(x)
reconKernel[idx] += -p4 * 5 * (48 - 12 * n*n*PI*PI + powf(n*PI, 4)) / (32 * powf(du*n*PI, 6));
// H4(x)
reconKernel[idx] += p3 * (6 - n * n*PI*PI) / (4 * du * powf(du*n*PI, 4));
// H3(x)
reconKernel[idx] += p2 * (4 - n * n*PI*PI) * 3 / (8 * powf(du*n*PI, 4));
// H2(x)
reconKernel[idx] += -p1 / (2 * n*n *PI*PI * du3);
// H1(x)
reconKernel[idx] += -p0 / (n*n *PI*PI * du2);
}
}
}
__global__ void InitReconKernel_Hilbert(float* reconKernel, const int N, const float du, const float t)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < 2 * N - 1)
{
int n = tid - (N - 1);
if (n % 2 == 0)
reconKernel[tid] = 0;
else
{
reconKernel[tid] = 1 / (PI * PI * n * du);
if (t < 0)
reconKernel[tid] = -reconKernel[tid];
}
}
}
// weight the sinogram data
// sgm: sinogram (width x height x slice)
// N: width
// H: height
// V: views
// S: slice
// sliceThickness: mm
// sliceOffcenter: mm
// sdd: source to detector distance
// totalScanAngle
__global__ void WeightSinogram_device(float* sgm, const float* u, const int N, const int H, const int V, \
const int S, const float sliceThickness, const float sliceOffcenter, float* sdd_array, float totalScanAngle, bool shortScan, float *beta_array, float* offcenter_array)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < N && row < V)
{
float offcenter_bias = offcenter_array[row] - offcenter_array[0];
float u_actual = u[col] + offcenter_bias;//actual u value due to non uniform offcenter
float sdd = sdd_array[row];
for (int i = 0; i < S; i++)
{
float v = sliceThickness * (i - (float(S) / 2.0f + 0.5)) + sliceOffcenter;
sgm[row*N + col + i * N*H] *= sdd * sdd / sqrtf((u_actual)*(u_actual)+sdd * sdd + v * v);
//the loop is to include all the slices since there may be more than one slice
}
if (shortScan)
{
//this beta is different from the beta_array
//To calculate the parker weighting, beta should begin with zero degree
//while the betaArray includes the start rotation angle
//adding abs function to deal with the case when totalScanAngle is negative
float beta = abs(beta_array[row] - beta_array[0]);
float rotation_direction = abs(totalScanAngle) / (totalScanAngle);
float gamma = atan(u_actual / sdd) * rotation_direction;
//float beta = abs(totalScanAngle/ 180.0f * PI ) / float(V) * float(row) ;
//float gamma = abs(totalScanAngle) / totalScanAngle * atan(u[col] / sdd);
float gamma_max = abs(totalScanAngle) * PI / 180.0f - PI;
//calculation of the parker weighting
float weighting = 0;
if (beta >= 0 && beta < gamma_max - 2 * gamma)
{
weighting = sin(PI / 2 * beta / (gamma_max - 2 * gamma));
weighting = weighting * weighting;
}
else if (beta >= gamma_max - 2 * gamma && beta < PI - 2 * gamma)
{
weighting = 1;
}
else if (beta >= PI - 2 * gamma && beta <= PI + gamma_max)
{
weighting = sin(PI / 2 * (PI + gamma_max - beta) / (gamma_max + 2 * gamma));
weighting = weighting * weighting;
}
else
{
//printf("ERROR!");
}
for (int i = 0; i < S; i++)
{
sgm[row*N + col + i * N*H] *= weighting;
}
}
else
{
;
}
}
}
// weight the sinogram data of Hilbert kernel (for phase contrast imaging)
// sgm: sinogram (width x height x slice)
// N: width
// V: height (views)
// S: slice
// sdd: source to detector distance
__global__ void WeightSinogramHilbert_device(float* sgm, const float* u, const int N, const int V, const int S, float sdd)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < N && row < V)
{
for (int i = 0; i < S; i++)
{
sgm[row*N + col + i * N*V] *= sqrtf(u[col] * u[col] + sdd * sdd);
}
}
}
// weight the sinogram data of Hilbert kernel (for phase contrast imaging) along angle direction (temporary test)
// sgm: sinogram (width x height x slice)
// N: width
// V: height (views)
// S: slice
// sdd: source to detector distance
__global__ void WeightSinogramHilbert_angle_device(float* sgm, const float* u, const int N, const int V, const int S, float sdd)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < N && row < V)
{
for (int i = 0; i < S; i++)
{
sgm[row*N + col + i * N*V] *= sdd / sqrtf(u[col] * u[col] + sdd * sdd);
}
}
}
// perform beam hardening correction of sinogram
// sgm: sinogram (width x height x slice)
// N: width
// V: height (views)
// S: slice
// p0-p9: correction parameters
__global__ void CorrectBeamHardening_device(float* sgm, const int N, const int V, const int S, float p0, float p1, float p2, float p3, float p4, float p5, float p6, float p7, float p8, float p9)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < N && row < V)
{
for (int i = 0; i < S; i++)
{
float oldSgm = sgm[row*N + col + i * N*V];
sgm[row*N + col + i * N*V] = p0 + p1 * powf(oldSgm, 1) + p2 * powf(oldSgm, 2) + p3 * powf(oldSgm, 3) + p4 * powf(oldSgm, 4) + p5 * powf(oldSgm, 5) + p6 * powf(oldSgm, 6) + p7 * powf(oldSgm, 7) + p8 * powf(oldSgm, 8) + p9 * powf(oldSgm, 9);
}
}
}
// convolve the sinogram data
// sgm_flt: sinogram data after convolving
// sgm: initial sinogram data
// reconKernel: reconstruction kernel
// N: sinogram width
// H: sinogram height
// V: number of views
// S: number of slices
// u: the position (coordinate) of each detector element
// du: detector element size [mm]
__global__ void ConvolveSinogram_device(float* sgm_flt, const float* sgm, float* reconKernel, const int N, const int H, const int V, const int S, const float* u, const float du)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < N && row < V)
{
for (int slice = 0; slice < S; slice++)
{
// temporary variable to speed up
float sgm_flt_local = 0;
for (int i = 0; i < N; i++)
{
sgm_flt_local += sgm[row*N + i + slice * N*H] * reconKernel[N - 1 - col + i];
}
sgm_flt[row*N + col + slice * N*V] = sgm_flt_local * du;
}
}
}
// Copy the sinogram data from one array(pointer) to another array(pointer). This is for "None" kernel reconstruction.
// sgm_flt: sinogram data after copy
// sgm: initial sinogram data
// N: sinogram width
// H: sinogram height
// V: number of views
// S: number of slices
__global__ void CopySinogram_device(float* sgm_flt, const float* sgm, const int N, const int H, const int V, const int S)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < N && row < V)
{
for (int slice = 0; slice < S; slice++)
{
sgm_flt[row * N + col + slice * N * V] = sgm[row * N + col + slice * N * V];
}
}
}
// backproject the image using pixel-driven method
// sgm: sinogram data
// img: image data
// U: each detector element position [mm]
// u: detector pixel array
// v: detector pixel array in z axis
// beta: view angle [radius]
// N: number of detector elements
// V: number of views
// totalScanAngle [rads]
// S: number of slices of the sinogram
// coneBeam: whether the recon is conbeam recon or not
// M: image dimension
// imgS: image slice count
// sdd: source to detector distance [mm]
// sid: source to isocenter distance [mm]
// du: detector element size [mm]
// dv: detector element height [mm]
// dx: image pixel size [mm]
// dz: image slice thickness [mm]
// (xc, yc, zc): image center position [mm, mm, mm]
__global__ void BackprojectPixelDriven_device(float* sgm, float* img, float* u, float* v, float* beta, bool shortScan, const int N, const int V, \
const int S, bool coneBeam, const int M, const int imgS, float* sdd_array, float* sid_array, float* offcenter_array, const float dx, const float dz, \
const float xc, const float yc, const float zc, int imgS_idx)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
float du = u[1] - u[0];
float dv = v[1] - v[0];
if (col < M && row < M && imgS_idx <= imgS)
{
float x = (col - (M - 1) / 2.0f)*dx + xc;
float y = ((M - 1) / 2.0f - row)*dx + yc;
float z;
float U, u0, v0;
float mag_factor;
float w;
int k;
float w_z;//weight for cbct
int k_z;//index for cbct
float delta_beta;// delta_beta for the integral calculation (nonuniform scan angle)
float lower_row_val, upper_row_val;
for (int slice = imgS_idx; slice < imgS_idx + 1; slice++)
{
z = (slice - (float(imgS) - 1.0f) / 2.0f) * dz + zc;
// temporary local variable to speed up
float img_local = 0;
for (int view = 0; view < V; view++)
{
float offcenter_bias = offcenter_array[view] - offcenter_array[0];
float sid = sid_array[view];
float sdd = sdd_array[view];
//calculation of delta_beta for the integral calculation
if (view == 0)
delta_beta = abs(beta[1] - beta[0]);
else if (view == V - 1)
delta_beta = abs(beta[view] - beta[view - 1]);
else
delta_beta = abs(beta[view + 1] - beta[view - 1]) / 2.0f;
U = sid - x * cosf(beta[view]) - y * sinf(beta[view]);
//calculate the magnification
mag_factor = sdd / U;
// find u0
u0 = mag_factor * (x*sinf(beta[view]) - y * cosf(beta[view]));
k = floorf((u0 - (u[0] + offcenter_bias)) / du);
if (k<0 || k + 1>N - 1)
{
img_local = 0;
break;
}
w = (u0 - (u[k] + offcenter_bias)) / du;
// for cone beam ct, we also need to find v0
if (coneBeam && abs(dv) > 0.00001f)
{
v0 = mag_factor * z;
// weight for cbct recon
k_z = floorf((v0 - v[0]) / dv);
if (k_z<0 || k_z + 1>S - 1)
{
img_local = 0;
break;
}
w_z = (v0 - v[k_z]) / dv;
lower_row_val = (w*sgm[view*N + k + 1 + k_z * N*V] + (1 - w)*sgm[view*N + k + k_z * N*V]);
upper_row_val = (w*sgm[view*N + k + 1 + (k_z + 1) * N*V] + (1 - w)*sgm[view*N + k + (k_z + 1) * N*V]);
img_local += sid / U / U * (w_z*upper_row_val + (1 - w_z)*lower_row_val) * delta_beta;
}
else
{
img_local += sid / U / U * (w*sgm[view*N + k + 1 + slice * N*V] + (1 - w)*sgm[view*N + k + slice * N*V]) * delta_beta;
}
}
//judge whether the scan is a full scan or a short scan
if (shortScan)
{
//printf("this is a full scan");
img[row*M + col] = img_local;
}
else
img[row*M + col] = img_local / 2.0f;
}
}
}
__global__ void BackprojectPixelDriven_pmatrix_device(float* sgm, float* img, float* u, float* v, float* beta, float* pmatrix, \
bool shortScan, const int N, const int V, const int S, bool coneBeam, const int M, const int imgS, float* sdd_array, float* sid_array, \
const float dx, const float dz, const float xc, const float yc, const float zc, int imgS_idx, float imgRot)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
float du = u[1] - u[0];
float imgRot_in_rad = imgRot * PI / 180.0f;
if (col < M && row < M && imgS_idx < imgS)
{
float x_after_rotation = (col - (M - 1) / 2.0f)*dx + xc;
float y_after_rotation = ((M - 1) / 2.0f - row)*dx + yc;
float x = x_after_rotation * cos(imgRot_in_rad) + y_after_rotation * sin(imgRot_in_rad);//(col - (M - 1) / 2.0f)*dx + xc;
float y = y_after_rotation * cos(imgRot_in_rad) - x_after_rotation * sin(imgRot_in_rad);//((M - 1) / 2.0f - row)*dx + yc;
float z;
float U;
float w;
int k;
float w_z;//weight for cbct
int k_z;//index for cbct
float delta_beta;// delta_beta for the integral calculation (nonuniform scan angle)
float lower_row_val, upper_row_val;
for (int slice = imgS_idx; slice < imgS_idx + 1; slice++)
{
z = (slice - (float(imgS) - 1.0f) / 2.0f) * dz + zc;
// temporary local variable to speed up
float img_local = 0;
for (int view = 0; view < V; view++)
{
float sid = sid_array[view];
float sdd = sdd_array[view];
//calculation of delta_beta for the integral calculation
if (view == 0)
delta_beta = abs(beta[1] - beta[0]);
else if (view == V - 1)
delta_beta = abs(beta[view] - beta[view - 1]);
else
delta_beta = abs(beta[view + 1] - beta[view - 1]) / 2.0f;
//use pmatrix to calculate the corresponding index on the detector
int pos_in_matrix = 12 * view;
float k_u_divide_mag = pmatrix[pos_in_matrix] * x + pmatrix[pos_in_matrix + 1] * y + pmatrix[pos_in_matrix + 2] * z + pmatrix[pos_in_matrix + 3] * 1;
float one_divide_mag = pmatrix[pos_in_matrix + 8] * x + pmatrix[pos_in_matrix + 9] * y + pmatrix[pos_in_matrix + 10] * z + pmatrix[pos_in_matrix + 11] * 1;
//the pmatrix is calculated when the detector is binned with 4 pixels
// each after binning is 0.4 mm
float k_f_bin_4 = k_u_divide_mag / one_divide_mag;//float number of k_f_bin_4
float u_position_true = (k_f_bin_4 + 0.5f)*0.4;
float k_f = u_position_true / du - 0.5f;
//float k_f = k_u_divide_mag / one_divide_mag;//float number of k
k = floorf(k_f);
//the pmatrix is acquired assuming beta[0]=0
//however, in a real recon, the image need to be rotated
//we need to retrieve the beta value for the pmatrix recon
//for calculation of U
float beta_pmatrix = beta[view] - beta[0];
U = sid - x * cosf(beta_pmatrix) - y * sinf(beta_pmatrix);
if (k<0 || k + 1>N - 1)
{
img_local = 0;
break;
}
w = k_f - k;
// for cone beam ct, we also need to find v0
if (coneBeam)
{
float k_z_divide_mag = pmatrix[pos_in_matrix + 4] * x + pmatrix[pos_in_matrix + 5] * y + pmatrix[pos_in_matrix + 6] * z + pmatrix[pos_in_matrix + 7] * 1;
float k_z_f = k_z_divide_mag / one_divide_mag;//float number of k_z
k_z = floorf(k_z_f);
if (k_z<0 || k_z + 1>S - 1)
{
img_local = 0;
break;
}
w_z = k_z_f - k_z;
lower_row_val = (w*sgm[view*N + k + 1 + k_z * N*V] + (1 - w)*sgm[view*N + k + k_z * N*V]);
upper_row_val = (w*sgm[view*N + k + 1 + (k_z + 1) * N*V] + (1 - w)*sgm[view*N + k + (k_z + 1) * N*V]);
img_local += sid / U / U * (w_z*upper_row_val + (1 - w_z)*lower_row_val) * delta_beta;
}
else
{
img_local += sid / U / U * (w*sgm[view*N + k + 1 + slice * N*V] + (1 - w)*sgm[view*N + k + slice * N*V]) * delta_beta;
}
}
//judge whether the scan is a full scan or a short scan
if (shortScan)
{
//printf("this is a full scan");
img[row*M + col] = img_local;
}
else
{
img[row*M + col] = img_local / 2.0f;
}
}
}
}
// backproject the image using pixel-driven method for Hilbert kernel (for phase contrast imaging)
// sgm: sinogram data
// img: image data
// U: each detector element position [mm]
// beta: view angle [radius]
// N: number of detector elements
// V: number of views
// S: number of slices
// M: image dimension
// sdd: source to detector distance [mm]
// sid: source to isocenter distance [mm]
// du: detector element size [mm]
// dx: image pixel size [mm]
// (xc, yc): image center position [mm, mm]
__global__ void BackprojectPixelDrivenHilbert_device(float* sgm, float* img, float* u, float* beta, const int N, const int V, \
const int S, const int M, const float sdd, const float sid, const float du, const float dx, const float xc, const float yc, int imgS_idx)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < M && row < M && imgS_idx < S)
{
float x = (col - (M - 1) / 2.0f)*dx + xc;
float y = ((M - 1) / 2.0f - row)*dx + yc;
float U, u0;
float w;
int k;
for (int slice = imgS_idx; slice < imgS_idx + 1; slice++)
{
img[row*M + col] = 0;
for (int view = 0; view < V; view++)
{
U = sid - x * cosf(beta[view]) - y * sinf(beta[view]);
u0 = sdd * (x*sinf(beta[view]) - y * cosf(beta[view])) / U;
k = floorf((u0 - u[0]) / du);
if (k<0 || k + 1>N - 1)
{
img[row*M + col] = 0;
break;
}
w = (u0 - u[k]) / du;
img[row*M + col] += 1 / U * (w*sgm[view*N + k + 1 + slice * N*V] + (1 - w)*sgm[view*N + k + slice * N*V]);
}
img[row*M + col] *= PI / V;
}
}
}
void InitializeDistance_Agent(float* &distance_array, const float distance, const int V)
{
if (distance_array != nullptr)
hipFree(distance_array);
hipMalloc((void**)&distance_array, V * sizeof(float));
InitDistance << <(V + 511) / 512, 512 >> > (distance_array, distance, V);
}
void InitializeNonuniformSDD_Agent(float* &distance_array, const int V, const std::string& distanceFile)
{
namespace fs = std::filesystem;
namespace js = rapidjson;
if (distance_array != nullptr)
hipFree(distance_array);
hipMalloc((void**)&distance_array, V * sizeof(float));
float* distance_array_cpu = new float[V];
std::ifstream ifs(distanceFile);
if (!ifs)
{
printf("\nCannot find SDD information file '%s'!\n", distanceFile.c_str());
exit(-2);
}
rapidjson::IStreamWrapper isw(ifs);
rapidjson::Document doc;
doc.ParseStream<js::kParseCommentsFlag | js::kParseTrailingCommasFlag>(isw);
js::Value distance_jsonc_value;
if (doc.HasMember("SourceDetectorDistance"))
{
distance_jsonc_value = doc["SourceDetectorDistance"];
}
else if(doc.HasMember("Value"))//a new version of the program uses value as member to avoid complex member names
{
distance_jsonc_value = doc["Value"];
}
else
{
printf("\nDid not find Value member in jsonc file!\n");
exit(-2);
}
if (distance_jsonc_value.Size() != V)
{
printf("\nNumber of sdd values is %d while the number of Views is %d!\n", distance_jsonc_value.Size(), V);
exit(-2);
}
for (unsigned i = 0; i < distance_jsonc_value.Size(); i++)
{
distance_array_cpu[i] = distance_jsonc_value[i].GetFloat();
}
hipMemcpy(distance_array, distance_array_cpu, V * sizeof(float), hipMemcpyHostToDevice);
hipDeviceSynchronize();
}
void InitializeNonuniformSID_Agent(float* &distance_array, const int V, const std::string& distanceFile)
{
namespace fs = std::filesystem;
namespace js = rapidjson;
if (distance_array != nullptr)
hipFree(distance_array);
hipMalloc((void**)&distance_array, V * sizeof(float));
float* distance_array_cpu = new float[V];
std::ifstream ifs(distanceFile);
if (!ifs)
{
printf("\nCannot find SID information file '%s'!\n", distanceFile.c_str());
exit(-2);
}
rapidjson::IStreamWrapper isw(ifs);
rapidjson::Document doc;
doc.ParseStream<js::kParseCommentsFlag | js::kParseTrailingCommasFlag>(isw);
js::Value distance_jsonc_value;
if (doc.HasMember("SourceIsocenterDistance"))
{
distance_jsonc_value = doc["SourceIsocenterDistance"];
}
else if (doc.HasMember("Value"))//a new version of the program uses value as member to avoid complex member names
{
distance_jsonc_value = doc["Value"];
}
else
{
printf("\nDid not find SourceIsocenterDistance or Value member in jsonc file!\n");
exit(-2);
}
if (distance_jsonc_value.Size() != V)
{
printf("\nNumber of sid values is %d while the number of Views is %d!\n", distance_jsonc_value.Size(), V);
exit(-2);
}
for (unsigned i = 0; i < distance_jsonc_value.Size(); i++)
{
distance_array_cpu[i] = distance_jsonc_value[i].GetFloat();
}
hipMemcpy(distance_array, distance_array_cpu, V * sizeof(float), hipMemcpyHostToDevice);
hipDeviceSynchronize();
}
void InitializeNonuniformOffCenter_Agent(float* &distance_array, const int V, const std::string& distanceFile)
{
namespace fs = std::filesystem;
namespace js = rapidjson;
if (distance_array != nullptr)
hipFree(distance_array);
hipMalloc((void**)&distance_array, V * sizeof(float));
float* distance_array_cpu = new float[V];
std::ifstream ifs(distanceFile);
if (!ifs)
{
printf("\nCannot find Offcenter information file '%s'!\n", distanceFile.c_str());
exit(-2);
}
rapidjson::IStreamWrapper isw(ifs);
rapidjson::Document doc;
doc.ParseStream<js::kParseCommentsFlag | js::kParseTrailingCommasFlag>(isw);
js::Value distance_jsonc_value;
if (doc.HasMember("OffcenterArray"))
{
distance_jsonc_value = doc["OffcenterArray"];
}
else if (doc.HasMember("Value"))//a new version of the program uses value as member to avoid complex member names
{
distance_jsonc_value = doc["Value"];
}
else
{
printf("\nDid not find OffcenterArray or Value member in jsonc file!\n");
exit(-2);
}
if (distance_jsonc_value.Size() != V)
{
printf("\nNumber of offcenter values is %d while the number of Views is %d!\n", distance_jsonc_value.Size(), V);
exit(-2);
}
for (unsigned i = 0; i < distance_jsonc_value.Size(); i++)
{
distance_array_cpu[i] = distance_jsonc_value[i].GetFloat();
}
hipMemcpy(distance_array, distance_array_cpu, V * sizeof(float), hipMemcpyHostToDevice);
hipDeviceSynchronize();
}
void InitializePMatrix_Agent(float* &pmatrix_array, const int V, const std::string& pmatrixFile)
{
namespace fs = std::filesystem;
namespace js = rapidjson;
if (pmatrix_array != nullptr)
hipFree(pmatrix_array);
//hipMallocManaged((void**)&pmatrix_array, 12 * V * sizeof(float));
hipMalloc((void**)&pmatrix_array, 12 * V * sizeof(float));
//hipMallocManaged somehow does not work for this function
//so hipMalloc and hipMemcpy is used
float* pmatrix_array_cpu = new float[12 * V];
std::ifstream ifs(pmatrixFile);
if (!ifs)
{
printf("\nCannot find pmatrix information file '%s'!\n", pmatrixFile.c_str());
exit(-2);
}
rapidjson::IStreamWrapper isw(ifs);
rapidjson::Document doc;
doc.ParseStream<js::kParseCommentsFlag | js::kParseTrailingCommasFlag>(isw);
js::Value pmatrix_jsonc_value;
if (doc.HasMember("PMatrix"))
{
pmatrix_jsonc_value = doc["PMatrix"];
}
else if(doc.HasMember("Value"))
{
pmatrix_jsonc_value = doc["Value"];
}
else
{
printf("\nDid not find PMatrix or Value member in jsonc file!\n");
exit(-2);
}
if (pmatrix_jsonc_value.Size() != 12 * V)
{
printf("\nNumber of pmatrix elements is %d while the 12 times number of Views is %d!\n", pmatrix_jsonc_value.Size(), 12 * V);
exit(-2);
}
for (unsigned i = 0; i < 12 * V; i++)
{
//printf("\n%d: %f",i, pmatrix_jsonc_value[i].GetFloat());
pmatrix_array_cpu[i] = pmatrix_jsonc_value[i].GetFloat();
}
hipMemcpy(pmatrix_array, pmatrix_array_cpu, 12 * V * sizeof(float), hipMemcpyHostToDevice);
hipDeviceSynchronize();
}
void InitializeU_Agent(float* &u, const int N, const float du, const float offcenter)
{
if (u != nullptr)
hipFree(u);
hipMalloc((void**)&u, N * sizeof(float));
InitU << <(N + 511) / 512, 512 >> > (u, N, du, offcenter);
}
void InitializeBeta_Agent(float* &beta, const int V, const float rotation, const float totalScanAngle)
{
if (beta != nullptr)
hipFree(beta);
hipMalloc((void**)&beta, V * sizeof(float));
InitBeta << < (V + 511) / 512, 512 >> > (beta, V, rotation, totalScanAngle);
}
void InitializeNonuniformBeta_Agent(float* &beta, const int V, const float rotation, const std::string& scanAngleFile)
//unit of beta is RADs
{
namespace fs = std::filesystem;
namespace js = rapidjson;
if (beta != nullptr)
hipFree(beta);
hipMalloc((void**)&beta, V * sizeof(float));
float* beta_cpu = new float[V];
std::ifstream ifs(scanAngleFile);
if (!ifs)
{
printf("Cannot find angle information file '%s'!\n", scanAngleFile.c_str());
exit(-2);
}
rapidjson::IStreamWrapper isw(ifs);
rapidjson::Document doc;
doc.ParseStream<js::kParseCommentsFlag | js::kParseTrailingCommasFlag>(isw);
js::Value scan_angle_jsonc_value;
if (doc.HasMember("ScanAngle"))
{
scan_angle_jsonc_value = doc["ScanAngle"];
}
else if (doc.HasMember("Value"))
{
scan_angle_jsonc_value = doc["Value"];
}
else
{
printf("Did not find ScanAngle or Value member in jsonc file!\n");
exit(-2);
}
if (scan_angle_jsonc_value.Size() != V)
{
printf("Number of scan angles is %d while the number of Views is %d!\n", scan_angle_jsonc_value.Size(), V);
exit(-2);
}
for (unsigned i = 0; i < scan_angle_jsonc_value.Size(); i++)
{
beta_cpu[i] = rotation / 180.0f*PI + scan_angle_jsonc_value[i].GetFloat() / 180.0*PI;
}
hipMemcpy(beta, beta_cpu, sizeof(float)*V, hipMemcpyHostToDevice);
hipDeviceSynchronize();
}
void InitializeReconKernel_Agent(float* &reconKernel, const int N, const float du, const std::string& kernelName, const std::vector<float>& kernelParam)
{
if (reconKernel != nullptr)
hipFree(reconKernel);
hipMalloc((void**)&reconKernel, (2 * N - 1) * sizeof(float));
if (kernelName == "HammingFilter")
{
InitReconKernel_Hamming << <(2 * N - 1 + 511) / 512, 512 >> > (reconKernel, N, du, kernelParam[0]);
}
if (kernelName == "Delta")
{
InitReconKernel_Delta << <(2 * N - 1 + 511) / 512, 512 >> > (reconKernel, N, du, kernelParam[0]);
}
else if (kernelName == "QuadraticFilter")
{
float lastParam = 0.0f;
if (kernelParam.size() == 3)
lastParam = kernelParam[2];
InitReconKernel_Quadratic << <(2 * N - 1 + 511) / 512, 512 >> > (reconKernel, N, du, int(kernelParam.size()), kernelParam[0], kernelParam[1], lastParam);
}
else if (kernelName == "Polynomial")
{
// TODO:
// InitReconKernel_Polynomial <<<...>>> (...);
float p[7] = { 0 };
for (size_t i = 0; i < kernelParam.size(); i++)
{
p[i] = kernelParam[kernelParam.size() - 1 - i];
}
//InitReconKernel_Polynomial <<<(2 * N - 1 + 511) / 512, 512>>> (reconKernel, N, du, p[0], p[1], p[2], p[3], p[4], p[5], p[6]);
InitReconKernel_Polynomial << <(2 * N - 1 + 511) / 512, 512 >> > (reconKernel, N, du, p[6], p[5], p[4], p[3], p[2], p[1], p[0]);
}
else if (kernelName == "Hilbert" || kernelName == "Hilbert_angle")
{
InitReconKernel_Hilbert << <(2 * N - 1 + 511) / 512, 512 >> > (reconKernel, N, du, kernelParam[0]);
}
else if (kernelName == "GaussianApodizedRamp")
{
InitReconKernel_GaussianApodized << <(2 * N - 1 + 511) / 512, 512 >> > (reconKernel, N, du, kernelParam[0]);
}
else if (kernelName == "None")
{
// Do not need to do anything
}
}
void MallocManaged_Agent(float * &p, const int size)
{
hipMallocManaged((void**)&p, size);
}
void CorrectBeamHardening_Agent(float* sgm, mango::Config & config)
{
dim3 grid((config.sgmWidth + 15) / 16, (config.sgmHeight + 15) / 16);
dim3 block(16, 16);
CorrectBeamHardening_device << <grid, block >> > (sgm, config.sgmWidth, config.sgmHeight, config.sliceCount, config.beamHardening[0], config.beamHardening[1], config.beamHardening[2], config.beamHardening[3], config.beamHardening[4], config.beamHardening[5], config.beamHardening[6], config.beamHardening[7], config.beamHardening[8], config.beamHardening[9]);
hipDeviceSynchronize();
}
void FilterSinogram_Agent(float * sgm, float* sgm_flt, float* reconKernel, float* u, mango::Config & config, float* beta, float * sdd_array, float * offcenter_array)
{
// Step 1: weight the sinogram
dim3 grid((config.sgmWidth + 15) / 16, (config.sgmHeight + 15) / 16);
dim3 block(16, 16);
// Hilbert kernel for phase contrast imaging
if (config.kernelName == "Hilbert")
WeightSinogramHilbert_device << <grid, block >> > (sgm, u, config.sgmWidth, config.sgmHeight, config.sliceCount, config.sdd);
else if (config.kernelName == "Hilbert_angle")
{
printf("Kernel name: %s\n", config.kernelName);
WeightSinogramHilbert_angle_device << <grid, block >> > (sgm, u, config.sgmWidth, config.sgmHeight, config.sliceCount, config.sdd);
}
else if (config.kernelName == "None")
{
// Do not weight the sinogram(sgm)
}
// Common attenuation imaging
else
WeightSinogram_device << <grid, block >> > (sgm, u, config.sgmWidth, config.sgmHeight, config.views, config.sliceCount, \
config.sliceThickness, config.sliceOffcenter, sdd_array, config.totalScanAngle, config.shortScan, beta, offcenter_array);
hipDeviceSynchronize();
// Step 2: convolve the sinogram
if (config.kernelName == "GaussianApodizedRamp")
{
// if Guassian aposied kernel is used, the sinogram need to be filtered twice
// first by the ramp filter, then by the gaussian filter
float du = config.detEltSize;
float * reconKernel_ramp;
hipMalloc((void**)&reconKernel_ramp, (2 * config.sgmWidth - 1) * sizeof(float));
InitReconKernel_Hamming << <(2 * config.sgmWidth - 1 + 511) / 512, 512 >> > (reconKernel_ramp, config.sgmWidth, du, 1);
hipDeviceSynchronize();
//intermidiate filtration result is saved in sgm_flt_ramp
float *sgm_flt_ramp;
//MallocManaged_Agent(sgm_flt_ramp, config.sgmWidth*config.views*config.sliceCount * sizeof(float));
hipMalloc((void**)& sgm_flt_ramp, config.sgmWidth * config.views * config.sliceCount * sizeof(float));
ConvolveSinogram_device << <grid, block >> > (sgm_flt_ramp, sgm, reconKernel_ramp, config.sgmWidth, config.sgmHeight, config.views, config.sliceCount, u, config.detEltSize);
hipDeviceSynchronize();
//the height of the filtered sinogram shrinks to number of views, so the convolution parameters need to be adjusted accordingly
ConvolveSinogram_device << <grid, block >> > (sgm_flt, sgm_flt_ramp, reconKernel, config.sgmWidth, config.views, config.views, config.sliceCount, u, config.detEltSize);
hipDeviceSynchronize();
// free temporary memory
hipFree(reconKernel_ramp);
hipFree(sgm_flt_ramp);
}
else if (config.kernelName == "None")
{
// Do not perfrom convolution, just directly copy the data
hipLaunchKernelGGL(( CopySinogram_device) , dim3(grid), dim3(block) , 0, 0, sgm_flt, sgm, config.sgmWidth, config.sgmHeight, config.views, config.sliceCount);
hipDeviceSynchronize();
}
else
{
ConvolveSinogram_device << <grid, block >> > (sgm_flt, sgm, reconKernel, config.sgmWidth, config.sgmHeight, config.views, config.sliceCount, u, config.detEltSize);
hipDeviceSynchronize();
}
}
void BackprojectPixelDriven_Agent(float * sgm_flt, float * img, float* sdd_array, float* sid_array, float* offcenter_array, float* pmatrix_array, float * u, float *v, float* beta, mango::Config & config, int z_idx)
{
dim3 grid((config.imgDim + 15) / 16, (config.imgDim + 15) / 16);
dim3 block(16, 16);
// Hilbert kernel for phase contrast imaging
if (config.kernelName == "Hilbert" || config.kernelName == "Hilbert_angle")
{
BackprojectPixelDrivenHilbert_device << <grid, block >> > (sgm_flt, img, u, beta, config.sgmWidth, config.views, \
config.sliceCount, config.imgDim, config.sdd, config.sid, config.detEltSize, config.pixelSize, config.xCenter, config.yCenter, z_idx);
}
// Common attenuation imaging
else if (config.pmatrixFlag == false)// if pmatrix is not applied
{
BackprojectPixelDriven_device << <grid, block >> > (sgm_flt, img, u, v, beta, config.shortScan, config.sgmWidth, config.views, \
config.sliceCount, config.coneBeam, config.imgDim, config.imgSliceCount, sdd_array, sid_array, offcenter_array, config.pixelSize, config.imgSliceThickness, \
config.xCenter, config.yCenter, config.zCenter, z_idx);
}
else if (config.pmatrixFlag == true)// if pmatrix is applied
{
BackprojectPixelDriven_pmatrix_device << <grid, block >> > (sgm_flt, img, u, v, beta, pmatrix_array, config.shortScan, config.sgmWidth, config.views, \
config.sliceCount, config.coneBeam, config.imgDim, config.imgSliceCount, sdd_array, sid_array, config.pixelSize, config.imgSliceThickness, \
config.xCenter, config.yCenter, config.zCenter, z_idx, config.imgRot);
}
hipDeviceSynchronize();
}
void SaveReconImageSlice(const char* filename, float* rec_image, int z_idx, const mango::Config& config)
{
FILE* fp = NULL;
if (z_idx == 0)
fp = fopen(filename, "wb");
else
fp = fopen(filename, "ab");
if (fp == NULL)
{
fprintf(stderr, "Cannot save to file %s!\n", filename);
exit(4);
}
fwrite(rec_image, sizeof(float), config.imgDim*config.imgDim, fp);
fclose(fp);
}
void FreeMemory_Agent(float* &p)
{
hipFree(p);
p = nullptr;
} | 81187e23a115a2d409d82eb250a980c3d490193e.cu | #include "FbpClass_Agent.cuh"
#include <stdio.h>
#include "stdafx.h"
#define PI 3.1415926536f
__global__ void InitDistance(float *distance_array, const float distance, const int V)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < V)
{
distance_array[tid] = distance;
}
}
__global__ void InitU(float* u, const int N, const float du, const float offcenter)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < N)
{
u[tid] = (tid - (N - 1) / 2.0f) * du + offcenter;
}
}
__global__ void InitBeta(float* beta, const int V, const float rotation, const float totalScanAngle)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < V)
{
beta[tid] = (totalScanAngle / V * tid + rotation) * PI / 180;
}
}
__global__ void InitReconKernel_Hamming(float* reconKernel, const int N, const float du, const float t)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < 2 * N - 1)
{
// the center element index is N-1
int n = tid - (N - 1);
// ramp part
if (n == 0)
reconKernel[tid] = t / (4 * du*du);
else if (n % 2 == 0)
reconKernel[tid] = 0;
else
reconKernel[tid] = -t / (n*n * PI*PI * du*du);
// cosine part
int sgn = n % 2 == 0 ? 1 : -1;
reconKernel[tid] += (1 - t)* (sgn / (2 * PI*du*du) * (1.0f / (1 + 2 * n) + 1.0f / (1 - 2 * n))
- 1 / (PI*PI*du*du) * (1.0f / (1 + 2 * n) / (1 + 2 * n) + 1.0f / (1 - 2 * n) / (1 - 2 * n)));
}
}
__global__ void InitReconKernel_Delta(float* reconKernel, const int N, const float du, const float t)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < 2 * N - 1)
{
// the center element index is N-1
int n = tid - (N - 1);
if (n == 0)
reconKernel[tid] = t;
else
reconKernel[tid] = 0;
}
}
//initialize a Gaussian kernel
//This kernel will be used along with the ramp kernel
//delta is in number of pixels, which is the standard deviation of the gaussian
//This kernel is normalized
__global__ void InitReconKernel_GaussianApodized(float* reconKernel, const int N, const float du, const float delta)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < 1)
{
// the center element index is N-1
float temp_sum = 0;
for (int i = 0; i < 2 * N - 1; i++)
{
int n = i - (N - 1);
reconKernel[i] = exp( - float(n) * float(n) / 2.0 / delta / delta);
temp_sum = temp_sum + reconKernel[i];
}
for (int i = 0; i < 2 * N - 1; i++)
{
reconKernel[i] = reconKernel[i] / temp_sum / du;
}
}
}
__global__ void InitReconKernel_Quadratic(float* reconKernel, const int N, const float du, const int paramNum, const float p1, const float p2, const float p3)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < 2 * N - 1)
{
float a, b, c;
float kn = 1 / (2 * du);
if (paramNum == 2)
{
// p1 = t, p2 = h, p3 is ignored
a = (p2 - 1) / (kn*kn * (1 - 2 * p1));
b = -2 * a*p1*kn;
c = 1.0f;
}
else
{
a = p1;
b = p2;
c = p3;
}
reconKernel[idx] = 0.0f;
float du2 = du * du;
float du3 = du2 * du;
float du4 = du3 * du;
int n = idx - (N - 1);
if (n == 0)
{
// H3(x)
reconKernel[idx] += a / 32 / du4;
// H2(x)
reconKernel[idx] += b / 12 / du3;
// H1(x)
reconKernel[idx] += c / 4 / du2;
}
else if (n % 2 == 0)
{
// H3(x)
reconKernel[idx] += a * 3 / (8 * n*n * PI*PI * du4);
// H2(x)
reconKernel[idx] += b / (2 * n*n * PI*PI * du3);
// H1(x)
// do nothing, H1(even) is zero
}
else
{
// H3(x)
reconKernel[idx] += a * 3 / (8 * n*n * PI*PI * du4) * (4 / (n*n*PI*PI) - 1);
// H2(x)
reconKernel[idx] += -b / (2 * n*n * PI*PI * du3);
// H1(x)
reconKernel[idx] += -c / (n*n * PI*PI * du2);
}
}
}
__global__ void InitReconKernel_Polynomial(float* reconKernel, const int N, const float du, const float p6, const float p5, const float p4, const float p3, const float p2, const float p1, const float p0)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < 2 * N - 1)
{
int n = idx - (N - 1);
reconKernel[idx] = 0.0f;
float kn = 1 / (2 * du);
float du2 = du * du;
float du3 = du2 * du;
float du4 = du3 * du;
if (n == 0)
{
// H7(x)
reconKernel[idx] += p6 * powf(kn, 8) / 4;
// H6(x)
reconKernel[idx] += p5 * powf(kn, 7) * 2 / 7;
// H5(x)
reconKernel[idx] += p4 * powf(kn, 6) / 3;
// H4(x)
reconKernel[idx] += p3 * powf(kn, 5) * 2 / 5;
// H3(x)
reconKernel[idx] += p2 * powf(kn, 4) / 2;
// H2(x)
reconKernel[idx] += p1 * 2 * kn*kn*kn / 3;
// H1(x)
reconKernel[idx] += p0 * kn*kn;
}
else if (n % 2 == 0)
{
// H7(x)
reconKernel[idx] += p6 * 7 * (360 - 30 * n*n*PI*PI + powf(n*PI, 4)) / (128 * du2* powf(du*n*PI, 6));
// H6(x)
reconKernel[idx] += p5 * 3 * (120 - 20 * n*n*PI*PI + powf(n*PI, 4)) / (32 * du*powf(du*n*PI, 6));
// H5(x)
reconKernel[idx] += p4 * 5 * (n*n*PI*PI - 12) / (32 * du2 *powf(du*n*PI, 4));
// H4(x)
reconKernel[idx] += p3 * (n*n*PI*PI - 6) / (4 * du * powf(du*n*PI, 4));
// H3(x)
reconKernel[idx] += p2 * 3 / (8 * du4 * n*n * PI*PI);
// H2(x)
reconKernel[idx] += p1 / (2 * n*n *PI*PI * du3);
// H1(x)
// do nothing, H1(even) is zero
}
else
{
// H7(x)
reconKernel[idx] += p6 * 7 * (1440 - 360 * n*n*PI*PI + 30 * powf(n*PI, 4) - powf(n*PI, 6)) / (128 * powf(du*n*PI, 8));
// H6(x)
reconKernel[idx] += -p5 * 3 * (120 - 20 * n*n*PI*PI + powf(n*PI, 4)) / (32 * du*powf(du*n*PI, 6));
// H5(x)
reconKernel[idx] += -p4 * 5 * (48 - 12 * n*n*PI*PI + powf(n*PI, 4)) / (32 * powf(du*n*PI, 6));
// H4(x)
reconKernel[idx] += p3 * (6 - n * n*PI*PI) / (4 * du * powf(du*n*PI, 4));
// H3(x)
reconKernel[idx] += p2 * (4 - n * n*PI*PI) * 3 / (8 * powf(du*n*PI, 4));
// H2(x)
reconKernel[idx] += -p1 / (2 * n*n *PI*PI * du3);
// H1(x)
reconKernel[idx] += -p0 / (n*n *PI*PI * du2);
}
}
}
__global__ void InitReconKernel_Hilbert(float* reconKernel, const int N, const float du, const float t)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < 2 * N - 1)
{
int n = tid - (N - 1);
if (n % 2 == 0)
reconKernel[tid] = 0;
else
{
reconKernel[tid] = 1 / (PI * PI * n * du);
if (t < 0)
reconKernel[tid] = -reconKernel[tid];
}
}
}
// weight the sinogram data
// sgm: sinogram (width x height x slice)
// N: width
// H: height
// V: views
// S: slice
// sliceThickness: mm
// sliceOffcenter: mm
// sdd: source to detector distance
// totalScanAngle
__global__ void WeightSinogram_device(float* sgm, const float* u, const int N, const int H, const int V, \
const int S, const float sliceThickness, const float sliceOffcenter, float* sdd_array, float totalScanAngle, bool shortScan, float *beta_array, float* offcenter_array)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < N && row < V)
{
float offcenter_bias = offcenter_array[row] - offcenter_array[0];
float u_actual = u[col] + offcenter_bias;//actual u value due to non uniform offcenter
float sdd = sdd_array[row];
for (int i = 0; i < S; i++)
{
float v = sliceThickness * (i - (float(S) / 2.0f + 0.5)) + sliceOffcenter;
sgm[row*N + col + i * N*H] *= sdd * sdd / sqrtf((u_actual)*(u_actual)+sdd * sdd + v * v);
//the loop is to include all the slices since there may be more than one slice
}
if (shortScan)
{
//this beta is different from the beta_array
//To calculate the parker weighting, beta should begin with zero degree
//while the betaArray includes the start rotation angle
//adding abs function to deal with the case when totalScanAngle is negative
float beta = abs(beta_array[row] - beta_array[0]);
float rotation_direction = abs(totalScanAngle) / (totalScanAngle);
float gamma = atan(u_actual / sdd) * rotation_direction;
//float beta = abs(totalScanAngle/ 180.0f * PI ) / float(V) * float(row) ;
//float gamma = abs(totalScanAngle) / totalScanAngle * atan(u[col] / sdd);
float gamma_max = abs(totalScanAngle) * PI / 180.0f - PI;
//calculation of the parker weighting
float weighting = 0;
if (beta >= 0 && beta < gamma_max - 2 * gamma)
{
weighting = sin(PI / 2 * beta / (gamma_max - 2 * gamma));
weighting = weighting * weighting;
}
else if (beta >= gamma_max - 2 * gamma && beta < PI - 2 * gamma)
{
weighting = 1;
}
else if (beta >= PI - 2 * gamma && beta <= PI + gamma_max)
{
weighting = sin(PI / 2 * (PI + gamma_max - beta) / (gamma_max + 2 * gamma));
weighting = weighting * weighting;
}
else
{
//printf("ERROR!");
}
for (int i = 0; i < S; i++)
{
sgm[row*N + col + i * N*H] *= weighting;
}
}
else
{
;
}
}
}
// weight the sinogram data of Hilbert kernel (for phase contrast imaging)
// sgm: sinogram (width x height x slice)
// N: width
// V: height (views)
// S: slice
// sdd: source to detector distance
__global__ void WeightSinogramHilbert_device(float* sgm, const float* u, const int N, const int V, const int S, float sdd)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < N && row < V)
{
for (int i = 0; i < S; i++)
{
sgm[row*N + col + i * N*V] *= sqrtf(u[col] * u[col] + sdd * sdd);
}
}
}
// weight the sinogram data of Hilbert kernel (for phase contrast imaging) along angle direction (temporary test)
// sgm: sinogram (width x height x slice)
// N: width
// V: height (views)
// S: slice
// sdd: source to detector distance
__global__ void WeightSinogramHilbert_angle_device(float* sgm, const float* u, const int N, const int V, const int S, float sdd)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < N && row < V)
{
for (int i = 0; i < S; i++)
{
sgm[row*N + col + i * N*V] *= sdd / sqrtf(u[col] * u[col] + sdd * sdd);
}
}
}
// perform beam hardening correction of sinogram
// sgm: sinogram (width x height x slice)
// N: width
// V: height (views)
// S: slice
// p0-p9: correction parameters
__global__ void CorrectBeamHardening_device(float* sgm, const int N, const int V, const int S, float p0, float p1, float p2, float p3, float p4, float p5, float p6, float p7, float p8, float p9)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < N && row < V)
{
for (int i = 0; i < S; i++)
{
float oldSgm = sgm[row*N + col + i * N*V];
sgm[row*N + col + i * N*V] = p0 + p1 * powf(oldSgm, 1) + p2 * powf(oldSgm, 2) + p3 * powf(oldSgm, 3) + p4 * powf(oldSgm, 4) + p5 * powf(oldSgm, 5) + p6 * powf(oldSgm, 6) + p7 * powf(oldSgm, 7) + p8 * powf(oldSgm, 8) + p9 * powf(oldSgm, 9);
}
}
}
// convolve the sinogram data
// sgm_flt: sinogram data after convolving
// sgm: initial sinogram data
// reconKernel: reconstruction kernel
// N: sinogram width
// H: sinogram height
// V: number of views
// S: number of slices
// u: the position (coordinate) of each detector element
// du: detector element size [mm]
__global__ void ConvolveSinogram_device(float* sgm_flt, const float* sgm, float* reconKernel, const int N, const int H, const int V, const int S, const float* u, const float du)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < N && row < V)
{
for (int slice = 0; slice < S; slice++)
{
// temporary variable to speed up
float sgm_flt_local = 0;
for (int i = 0; i < N; i++)
{
sgm_flt_local += sgm[row*N + i + slice * N*H] * reconKernel[N - 1 - col + i];
}
sgm_flt[row*N + col + slice * N*V] = sgm_flt_local * du;
}
}
}
// Copy the sinogram data from one array(pointer) to another array(pointer). This is for "None" kernel reconstruction.
// sgm_flt: sinogram data after copy
// sgm: initial sinogram data
// N: sinogram width
// H: sinogram height
// V: number of views
// S: number of slices
__global__ void CopySinogram_device(float* sgm_flt, const float* sgm, const int N, const int H, const int V, const int S)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < N && row < V)
{
for (int slice = 0; slice < S; slice++)
{
sgm_flt[row * N + col + slice * N * V] = sgm[row * N + col + slice * N * V];
}
}
}
// backproject the image using pixel-driven method
// sgm: sinogram data
// img: image data
// U: each detector element position [mm]
// u: detector pixel array
// v: detector pixel array in z axis
// beta: view angle [radius]
// N: number of detector elements
// V: number of views
// totalScanAngle [rads]
// S: number of slices of the sinogram
// coneBeam: whether the recon is conbeam recon or not
// M: image dimension
// imgS: image slice count
// sdd: source to detector distance [mm]
// sid: source to isocenter distance [mm]
// du: detector element size [mm]
// dv: detector element height [mm]
// dx: image pixel size [mm]
// dz: image slice thickness [mm]
// (xc, yc, zc): image center position [mm, mm, mm]
__global__ void BackprojectPixelDriven_device(float* sgm, float* img, float* u, float* v, float* beta, bool shortScan, const int N, const int V, \
const int S, bool coneBeam, const int M, const int imgS, float* sdd_array, float* sid_array, float* offcenter_array, const float dx, const float dz, \
const float xc, const float yc, const float zc, int imgS_idx)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
float du = u[1] - u[0];
float dv = v[1] - v[0];
if (col < M && row < M && imgS_idx <= imgS)
{
float x = (col - (M - 1) / 2.0f)*dx + xc;
float y = ((M - 1) / 2.0f - row)*dx + yc;
float z;
float U, u0, v0;
float mag_factor;
float w;
int k;
float w_z;//weight for cbct
int k_z;//index for cbct
float delta_beta;// delta_beta for the integral calculation (nonuniform scan angle)
float lower_row_val, upper_row_val;
for (int slice = imgS_idx; slice < imgS_idx + 1; slice++)
{
z = (slice - (float(imgS) - 1.0f) / 2.0f) * dz + zc;
// temporary local variable to speed up
float img_local = 0;
for (int view = 0; view < V; view++)
{
float offcenter_bias = offcenter_array[view] - offcenter_array[0];
float sid = sid_array[view];
float sdd = sdd_array[view];
//calculation of delta_beta for the integral calculation
if (view == 0)
delta_beta = abs(beta[1] - beta[0]);
else if (view == V - 1)
delta_beta = abs(beta[view] - beta[view - 1]);
else
delta_beta = abs(beta[view + 1] - beta[view - 1]) / 2.0f;
U = sid - x * cosf(beta[view]) - y * sinf(beta[view]);
//calculate the magnification
mag_factor = sdd / U;
// find u0
u0 = mag_factor * (x*sinf(beta[view]) - y * cosf(beta[view]));
k = floorf((u0 - (u[0] + offcenter_bias)) / du);
if (k<0 || k + 1>N - 1)
{
img_local = 0;
break;
}
w = (u0 - (u[k] + offcenter_bias)) / du;
// for cone beam ct, we also need to find v0
if (coneBeam && abs(dv) > 0.00001f)
{
v0 = mag_factor * z;
// weight for cbct recon
k_z = floorf((v0 - v[0]) / dv);
if (k_z<0 || k_z + 1>S - 1)
{
img_local = 0;
break;
}
w_z = (v0 - v[k_z]) / dv;
lower_row_val = (w*sgm[view*N + k + 1 + k_z * N*V] + (1 - w)*sgm[view*N + k + k_z * N*V]);
upper_row_val = (w*sgm[view*N + k + 1 + (k_z + 1) * N*V] + (1 - w)*sgm[view*N + k + (k_z + 1) * N*V]);
img_local += sid / U / U * (w_z*upper_row_val + (1 - w_z)*lower_row_val) * delta_beta;
}
else
{
img_local += sid / U / U * (w*sgm[view*N + k + 1 + slice * N*V] + (1 - w)*sgm[view*N + k + slice * N*V]) * delta_beta;
}
}
//judge whether the scan is a full scan or a short scan
if (shortScan)
{
//printf("this is a full scan");
img[row*M + col] = img_local;
}
else
img[row*M + col] = img_local / 2.0f;
}
}
}
__global__ void BackprojectPixelDriven_pmatrix_device(float* sgm, float* img, float* u, float* v, float* beta, float* pmatrix, \
bool shortScan, const int N, const int V, const int S, bool coneBeam, const int M, const int imgS, float* sdd_array, float* sid_array, \
const float dx, const float dz, const float xc, const float yc, const float zc, int imgS_idx, float imgRot)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
float du = u[1] - u[0];
float imgRot_in_rad = imgRot * PI / 180.0f;
if (col < M && row < M && imgS_idx < imgS)
{
float x_after_rotation = (col - (M - 1) / 2.0f)*dx + xc;
float y_after_rotation = ((M - 1) / 2.0f - row)*dx + yc;
float x = x_after_rotation * cos(imgRot_in_rad) + y_after_rotation * sin(imgRot_in_rad);//(col - (M - 1) / 2.0f)*dx + xc;
float y = y_after_rotation * cos(imgRot_in_rad) - x_after_rotation * sin(imgRot_in_rad);//((M - 1) / 2.0f - row)*dx + yc;
float z;
float U;
float w;
int k;
float w_z;//weight for cbct
int k_z;//index for cbct
float delta_beta;// delta_beta for the integral calculation (nonuniform scan angle)
float lower_row_val, upper_row_val;
for (int slice = imgS_idx; slice < imgS_idx + 1; slice++)
{
z = (slice - (float(imgS) - 1.0f) / 2.0f) * dz + zc;
// temporary local variable to speed up
float img_local = 0;
for (int view = 0; view < V; view++)
{
float sid = sid_array[view];
float sdd = sdd_array[view];
//calculation of delta_beta for the integral calculation
if (view == 0)
delta_beta = abs(beta[1] - beta[0]);
else if (view == V - 1)
delta_beta = abs(beta[view] - beta[view - 1]);
else
delta_beta = abs(beta[view + 1] - beta[view - 1]) / 2.0f;
//use pmatrix to calculate the corresponding index on the detector
int pos_in_matrix = 12 * view;
float k_u_divide_mag = pmatrix[pos_in_matrix] * x + pmatrix[pos_in_matrix + 1] * y + pmatrix[pos_in_matrix + 2] * z + pmatrix[pos_in_matrix + 3] * 1;
float one_divide_mag = pmatrix[pos_in_matrix + 8] * x + pmatrix[pos_in_matrix + 9] * y + pmatrix[pos_in_matrix + 10] * z + pmatrix[pos_in_matrix + 11] * 1;
//the pmatrix is calculated when the detector is binned with 4 pixels
// each after binning is 0.4 mm
float k_f_bin_4 = k_u_divide_mag / one_divide_mag;//float number of k_f_bin_4
float u_position_true = (k_f_bin_4 + 0.5f)*0.4;
float k_f = u_position_true / du - 0.5f;
//float k_f = k_u_divide_mag / one_divide_mag;//float number of k
k = floorf(k_f);
//the pmatrix is acquired assuming beta[0]=0
//however, in a real recon, the image need to be rotated
//we need to retrieve the beta value for the pmatrix recon
//for calculation of U
float beta_pmatrix = beta[view] - beta[0];
U = sid - x * cosf(beta_pmatrix) - y * sinf(beta_pmatrix);
if (k<0 || k + 1>N - 1)
{
img_local = 0;
break;
}
w = k_f - k;
// for cone beam ct, we also need to find v0
if (coneBeam)
{
float k_z_divide_mag = pmatrix[pos_in_matrix + 4] * x + pmatrix[pos_in_matrix + 5] * y + pmatrix[pos_in_matrix + 6] * z + pmatrix[pos_in_matrix + 7] * 1;
float k_z_f = k_z_divide_mag / one_divide_mag;//float number of k_z
k_z = floorf(k_z_f);
if (k_z<0 || k_z + 1>S - 1)
{
img_local = 0;
break;
}
w_z = k_z_f - k_z;
lower_row_val = (w*sgm[view*N + k + 1 + k_z * N*V] + (1 - w)*sgm[view*N + k + k_z * N*V]);
upper_row_val = (w*sgm[view*N + k + 1 + (k_z + 1) * N*V] + (1 - w)*sgm[view*N + k + (k_z + 1) * N*V]);
img_local += sid / U / U * (w_z*upper_row_val + (1 - w_z)*lower_row_val) * delta_beta;
}
else
{
img_local += sid / U / U * (w*sgm[view*N + k + 1 + slice * N*V] + (1 - w)*sgm[view*N + k + slice * N*V]) * delta_beta;
}
}
//judge whether the scan is a full scan or a short scan
if (shortScan)
{
//printf("this is a full scan");
img[row*M + col] = img_local;
}
else
{
img[row*M + col] = img_local / 2.0f;
}
}
}
}
// backproject the image using pixel-driven method for Hilbert kernel (for phase contrast imaging)
// sgm: sinogram data
// img: image data
// U: each detector element position [mm]
// beta: view angle [radius]
// N: number of detector elements
// V: number of views
// S: number of slices
// M: image dimension
// sdd: source to detector distance [mm]
// sid: source to isocenter distance [mm]
// du: detector element size [mm]
// dx: image pixel size [mm]
// (xc, yc): image center position [mm, mm]
__global__ void BackprojectPixelDrivenHilbert_device(float* sgm, float* img, float* u, float* beta, const int N, const int V, \
const int S, const int M, const float sdd, const float sid, const float du, const float dx, const float xc, const float yc, int imgS_idx)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < M && row < M && imgS_idx < S)
{
float x = (col - (M - 1) / 2.0f)*dx + xc;
float y = ((M - 1) / 2.0f - row)*dx + yc;
float U, u0;
float w;
int k;
for (int slice = imgS_idx; slice < imgS_idx + 1; slice++)
{
img[row*M + col] = 0;
for (int view = 0; view < V; view++)
{
U = sid - x * cosf(beta[view]) - y * sinf(beta[view]);
u0 = sdd * (x*sinf(beta[view]) - y * cosf(beta[view])) / U;
k = floorf((u0 - u[0]) / du);
if (k<0 || k + 1>N - 1)
{
img[row*M + col] = 0;
break;
}
w = (u0 - u[k]) / du;
img[row*M + col] += 1 / U * (w*sgm[view*N + k + 1 + slice * N*V] + (1 - w)*sgm[view*N + k + slice * N*V]);
}
img[row*M + col] *= PI / V;
}
}
}
void InitializeDistance_Agent(float* &distance_array, const float distance, const int V)
{
if (distance_array != nullptr)
cudaFree(distance_array);
cudaMalloc((void**)&distance_array, V * sizeof(float));
InitDistance << <(V + 511) / 512, 512 >> > (distance_array, distance, V);
}
void InitializeNonuniformSDD_Agent(float* &distance_array, const int V, const std::string& distanceFile)
{
namespace fs = std::filesystem;
namespace js = rapidjson;
if (distance_array != nullptr)
cudaFree(distance_array);
cudaMalloc((void**)&distance_array, V * sizeof(float));
float* distance_array_cpu = new float[V];
std::ifstream ifs(distanceFile);
if (!ifs)
{
printf("\nCannot find SDD information file '%s'!\n", distanceFile.c_str());
exit(-2);
}
rapidjson::IStreamWrapper isw(ifs);
rapidjson::Document doc;
doc.ParseStream<js::kParseCommentsFlag | js::kParseTrailingCommasFlag>(isw);
js::Value distance_jsonc_value;
if (doc.HasMember("SourceDetectorDistance"))
{
distance_jsonc_value = doc["SourceDetectorDistance"];
}
else if(doc.HasMember("Value"))//a new version of the program uses value as member to avoid complex member names
{
distance_jsonc_value = doc["Value"];
}
else
{
printf("\nDid not find Value member in jsonc file!\n");
exit(-2);
}
if (distance_jsonc_value.Size() != V)
{
printf("\nNumber of sdd values is %d while the number of Views is %d!\n", distance_jsonc_value.Size(), V);
exit(-2);
}
for (unsigned i = 0; i < distance_jsonc_value.Size(); i++)
{
distance_array_cpu[i] = distance_jsonc_value[i].GetFloat();
}
cudaMemcpy(distance_array, distance_array_cpu, V * sizeof(float), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
}
void InitializeNonuniformSID_Agent(float* &distance_array, const int V, const std::string& distanceFile)
{
namespace fs = std::filesystem;
namespace js = rapidjson;
if (distance_array != nullptr)
cudaFree(distance_array);
cudaMalloc((void**)&distance_array, V * sizeof(float));
float* distance_array_cpu = new float[V];
std::ifstream ifs(distanceFile);
if (!ifs)
{
printf("\nCannot find SID information file '%s'!\n", distanceFile.c_str());
exit(-2);
}
rapidjson::IStreamWrapper isw(ifs);
rapidjson::Document doc;
doc.ParseStream<js::kParseCommentsFlag | js::kParseTrailingCommasFlag>(isw);
js::Value distance_jsonc_value;
if (doc.HasMember("SourceIsocenterDistance"))
{
distance_jsonc_value = doc["SourceIsocenterDistance"];
}
else if (doc.HasMember("Value"))//a new version of the program uses value as member to avoid complex member names
{
distance_jsonc_value = doc["Value"];
}
else
{
printf("\nDid not find SourceIsocenterDistance or Value member in jsonc file!\n");
exit(-2);
}
if (distance_jsonc_value.Size() != V)
{
printf("\nNumber of sid values is %d while the number of Views is %d!\n", distance_jsonc_value.Size(), V);
exit(-2);
}
for (unsigned i = 0; i < distance_jsonc_value.Size(); i++)
{
distance_array_cpu[i] = distance_jsonc_value[i].GetFloat();
}
cudaMemcpy(distance_array, distance_array_cpu, V * sizeof(float), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
}
void InitializeNonuniformOffCenter_Agent(float* &distance_array, const int V, const std::string& distanceFile)
{
namespace fs = std::filesystem;
namespace js = rapidjson;
if (distance_array != nullptr)
cudaFree(distance_array);
cudaMalloc((void**)&distance_array, V * sizeof(float));
float* distance_array_cpu = new float[V];
std::ifstream ifs(distanceFile);
if (!ifs)
{
printf("\nCannot find Offcenter information file '%s'!\n", distanceFile.c_str());
exit(-2);
}
rapidjson::IStreamWrapper isw(ifs);
rapidjson::Document doc;
doc.ParseStream<js::kParseCommentsFlag | js::kParseTrailingCommasFlag>(isw);
js::Value distance_jsonc_value;
if (doc.HasMember("OffcenterArray"))
{
distance_jsonc_value = doc["OffcenterArray"];
}
else if (doc.HasMember("Value"))//a new version of the program uses value as member to avoid complex member names
{
distance_jsonc_value = doc["Value"];
}
else
{
printf("\nDid not find OffcenterArray or Value member in jsonc file!\n");
exit(-2);
}
if (distance_jsonc_value.Size() != V)
{
printf("\nNumber of offcenter values is %d while the number of Views is %d!\n", distance_jsonc_value.Size(), V);
exit(-2);
}
for (unsigned i = 0; i < distance_jsonc_value.Size(); i++)
{
distance_array_cpu[i] = distance_jsonc_value[i].GetFloat();
}
cudaMemcpy(distance_array, distance_array_cpu, V * sizeof(float), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
}
void InitializePMatrix_Agent(float* &pmatrix_array, const int V, const std::string& pmatrixFile)
{
namespace fs = std::filesystem;
namespace js = rapidjson;
if (pmatrix_array != nullptr)
cudaFree(pmatrix_array);
//cudaMallocManaged((void**)&pmatrix_array, 12 * V * sizeof(float));
cudaMalloc((void**)&pmatrix_array, 12 * V * sizeof(float));
//cudaMallocManaged somehow does not work for this function
//so cudaMalloc and cudaMemcpy is used
float* pmatrix_array_cpu = new float[12 * V];
std::ifstream ifs(pmatrixFile);
if (!ifs)
{
printf("\nCannot find pmatrix information file '%s'!\n", pmatrixFile.c_str());
exit(-2);
}
rapidjson::IStreamWrapper isw(ifs);
rapidjson::Document doc;
doc.ParseStream<js::kParseCommentsFlag | js::kParseTrailingCommasFlag>(isw);
js::Value pmatrix_jsonc_value;
if (doc.HasMember("PMatrix"))
{
pmatrix_jsonc_value = doc["PMatrix"];
}
else if(doc.HasMember("Value"))
{
pmatrix_jsonc_value = doc["Value"];
}
else
{
printf("\nDid not find PMatrix or Value member in jsonc file!\n");
exit(-2);
}
if (pmatrix_jsonc_value.Size() != 12 * V)
{
printf("\nNumber of pmatrix elements is %d while the 12 times number of Views is %d!\n", pmatrix_jsonc_value.Size(), 12 * V);
exit(-2);
}
for (unsigned i = 0; i < 12 * V; i++)
{
//printf("\n%d: %f",i, pmatrix_jsonc_value[i].GetFloat());
pmatrix_array_cpu[i] = pmatrix_jsonc_value[i].GetFloat();
}
cudaMemcpy(pmatrix_array, pmatrix_array_cpu, 12 * V * sizeof(float), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
}
void InitializeU_Agent(float* &u, const int N, const float du, const float offcenter)
{
if (u != nullptr)
cudaFree(u);
cudaMalloc((void**)&u, N * sizeof(float));
InitU << <(N + 511) / 512, 512 >> > (u, N, du, offcenter);
}
void InitializeBeta_Agent(float* &beta, const int V, const float rotation, const float totalScanAngle)
{
if (beta != nullptr)
cudaFree(beta);
cudaMalloc((void**)&beta, V * sizeof(float));
InitBeta << < (V + 511) / 512, 512 >> > (beta, V, rotation, totalScanAngle);
}
void InitializeNonuniformBeta_Agent(float* &beta, const int V, const float rotation, const std::string& scanAngleFile)
//unit of beta is RADs
{
namespace fs = std::filesystem;
namespace js = rapidjson;
if (beta != nullptr)
cudaFree(beta);
cudaMalloc((void**)&beta, V * sizeof(float));
float* beta_cpu = new float[V];
std::ifstream ifs(scanAngleFile);
if (!ifs)
{
printf("Cannot find angle information file '%s'!\n", scanAngleFile.c_str());
exit(-2);
}
rapidjson::IStreamWrapper isw(ifs);
rapidjson::Document doc;
doc.ParseStream<js::kParseCommentsFlag | js::kParseTrailingCommasFlag>(isw);
js::Value scan_angle_jsonc_value;
if (doc.HasMember("ScanAngle"))
{
scan_angle_jsonc_value = doc["ScanAngle"];
}
else if (doc.HasMember("Value"))
{
scan_angle_jsonc_value = doc["Value"];
}
else
{
printf("Did not find ScanAngle or Value member in jsonc file!\n");
exit(-2);
}
if (scan_angle_jsonc_value.Size() != V)
{
printf("Number of scan angles is %d while the number of Views is %d!\n", scan_angle_jsonc_value.Size(), V);
exit(-2);
}
for (unsigned i = 0; i < scan_angle_jsonc_value.Size(); i++)
{
beta_cpu[i] = rotation / 180.0f*PI + scan_angle_jsonc_value[i].GetFloat() / 180.0*PI;
}
cudaMemcpy(beta, beta_cpu, sizeof(float)*V, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
}
void InitializeReconKernel_Agent(float* &reconKernel, const int N, const float du, const std::string& kernelName, const std::vector<float>& kernelParam)
{
if (reconKernel != nullptr)
cudaFree(reconKernel);
cudaMalloc((void**)&reconKernel, (2 * N - 1) * sizeof(float));
if (kernelName == "HammingFilter")
{
InitReconKernel_Hamming << <(2 * N - 1 + 511) / 512, 512 >> > (reconKernel, N, du, kernelParam[0]);
}
if (kernelName == "Delta")
{
InitReconKernel_Delta << <(2 * N - 1 + 511) / 512, 512 >> > (reconKernel, N, du, kernelParam[0]);
}
else if (kernelName == "QuadraticFilter")
{
float lastParam = 0.0f;
if (kernelParam.size() == 3)
lastParam = kernelParam[2];
InitReconKernel_Quadratic << <(2 * N - 1 + 511) / 512, 512 >> > (reconKernel, N, du, int(kernelParam.size()), kernelParam[0], kernelParam[1], lastParam);
}
else if (kernelName == "Polynomial")
{
// TODO:
// InitReconKernel_Polynomial <<<...>>> (...);
float p[7] = { 0 };
for (size_t i = 0; i < kernelParam.size(); i++)
{
p[i] = kernelParam[kernelParam.size() - 1 - i];
}
//InitReconKernel_Polynomial <<<(2 * N - 1 + 511) / 512, 512>>> (reconKernel, N, du, p[0], p[1], p[2], p[3], p[4], p[5], p[6]);
InitReconKernel_Polynomial << <(2 * N - 1 + 511) / 512, 512 >> > (reconKernel, N, du, p[6], p[5], p[4], p[3], p[2], p[1], p[0]);
}
else if (kernelName == "Hilbert" || kernelName == "Hilbert_angle")
{
InitReconKernel_Hilbert << <(2 * N - 1 + 511) / 512, 512 >> > (reconKernel, N, du, kernelParam[0]);
}
else if (kernelName == "GaussianApodizedRamp")
{
InitReconKernel_GaussianApodized << <(2 * N - 1 + 511) / 512, 512 >> > (reconKernel, N, du, kernelParam[0]);
}
else if (kernelName == "None")
{
// Do not need to do anything
}
}
void MallocManaged_Agent(float * &p, const int size)
{
cudaMallocManaged((void**)&p, size);
}
void CorrectBeamHardening_Agent(float* sgm, mango::Config & config)
{
dim3 grid((config.sgmWidth + 15) / 16, (config.sgmHeight + 15) / 16);
dim3 block(16, 16);
CorrectBeamHardening_device << <grid, block >> > (sgm, config.sgmWidth, config.sgmHeight, config.sliceCount, config.beamHardening[0], config.beamHardening[1], config.beamHardening[2], config.beamHardening[3], config.beamHardening[4], config.beamHardening[5], config.beamHardening[6], config.beamHardening[7], config.beamHardening[8], config.beamHardening[9]);
cudaDeviceSynchronize();
}
void FilterSinogram_Agent(float * sgm, float* sgm_flt, float* reconKernel, float* u, mango::Config & config, float* beta, float * sdd_array, float * offcenter_array)
{
// Step 1: weight the sinogram
dim3 grid((config.sgmWidth + 15) / 16, (config.sgmHeight + 15) / 16);
dim3 block(16, 16);
// Hilbert kernel for phase contrast imaging
if (config.kernelName == "Hilbert")
WeightSinogramHilbert_device << <grid, block >> > (sgm, u, config.sgmWidth, config.sgmHeight, config.sliceCount, config.sdd);
else if (config.kernelName == "Hilbert_angle")
{
printf("Kernel name: %s\n", config.kernelName);
WeightSinogramHilbert_angle_device << <grid, block >> > (sgm, u, config.sgmWidth, config.sgmHeight, config.sliceCount, config.sdd);
}
else if (config.kernelName == "None")
{
// Do not weight the sinogram(sgm)
}
// Common attenuation imaging
else
WeightSinogram_device << <grid, block >> > (sgm, u, config.sgmWidth, config.sgmHeight, config.views, config.sliceCount, \
config.sliceThickness, config.sliceOffcenter, sdd_array, config.totalScanAngle, config.shortScan, beta, offcenter_array);
cudaDeviceSynchronize();
// Step 2: convolve the sinogram
if (config.kernelName == "GaussianApodizedRamp")
{
// if Guassian aposied kernel is used, the sinogram need to be filtered twice
// first by the ramp filter, then by the gaussian filter
float du = config.detEltSize;
float * reconKernel_ramp;
cudaMalloc((void**)&reconKernel_ramp, (2 * config.sgmWidth - 1) * sizeof(float));
InitReconKernel_Hamming << <(2 * config.sgmWidth - 1 + 511) / 512, 512 >> > (reconKernel_ramp, config.sgmWidth, du, 1);
cudaDeviceSynchronize();
//intermidiate filtration result is saved in sgm_flt_ramp
float *sgm_flt_ramp;
//MallocManaged_Agent(sgm_flt_ramp, config.sgmWidth*config.views*config.sliceCount * sizeof(float));
cudaMalloc((void**)& sgm_flt_ramp, config.sgmWidth * config.views * config.sliceCount * sizeof(float));
ConvolveSinogram_device << <grid, block >> > (sgm_flt_ramp, sgm, reconKernel_ramp, config.sgmWidth, config.sgmHeight, config.views, config.sliceCount, u, config.detEltSize);
cudaDeviceSynchronize();
//the height of the filtered sinogram shrinks to number of views, so the convolution parameters need to be adjusted accordingly
ConvolveSinogram_device << <grid, block >> > (sgm_flt, sgm_flt_ramp, reconKernel, config.sgmWidth, config.views, config.views, config.sliceCount, u, config.detEltSize);
cudaDeviceSynchronize();
// free temporary memory
cudaFree(reconKernel_ramp);
cudaFree(sgm_flt_ramp);
}
else if (config.kernelName == "None")
{
// Do not perfrom convolution, just directly copy the data
CopySinogram_device <<<grid, block >>> (sgm_flt, sgm, config.sgmWidth, config.sgmHeight, config.views, config.sliceCount);
cudaDeviceSynchronize();
}
else
{
ConvolveSinogram_device << <grid, block >> > (sgm_flt, sgm, reconKernel, config.sgmWidth, config.sgmHeight, config.views, config.sliceCount, u, config.detEltSize);
cudaDeviceSynchronize();
}
}
void BackprojectPixelDriven_Agent(float * sgm_flt, float * img, float* sdd_array, float* sid_array, float* offcenter_array, float* pmatrix_array, float * u, float *v, float* beta, mango::Config & config, int z_idx)
{
dim3 grid((config.imgDim + 15) / 16, (config.imgDim + 15) / 16);
dim3 block(16, 16);
// Hilbert kernel for phase contrast imaging
if (config.kernelName == "Hilbert" || config.kernelName == "Hilbert_angle")
{
BackprojectPixelDrivenHilbert_device << <grid, block >> > (sgm_flt, img, u, beta, config.sgmWidth, config.views, \
config.sliceCount, config.imgDim, config.sdd, config.sid, config.detEltSize, config.pixelSize, config.xCenter, config.yCenter, z_idx);
}
// Common attenuation imaging
else if (config.pmatrixFlag == false)// if pmatrix is not applied
{
BackprojectPixelDriven_device << <grid, block >> > (sgm_flt, img, u, v, beta, config.shortScan, config.sgmWidth, config.views, \
config.sliceCount, config.coneBeam, config.imgDim, config.imgSliceCount, sdd_array, sid_array, offcenter_array, config.pixelSize, config.imgSliceThickness, \
config.xCenter, config.yCenter, config.zCenter, z_idx);
}
else if (config.pmatrixFlag == true)// if pmatrix is applied
{
BackprojectPixelDriven_pmatrix_device << <grid, block >> > (sgm_flt, img, u, v, beta, pmatrix_array, config.shortScan, config.sgmWidth, config.views, \
config.sliceCount, config.coneBeam, config.imgDim, config.imgSliceCount, sdd_array, sid_array, config.pixelSize, config.imgSliceThickness, \
config.xCenter, config.yCenter, config.zCenter, z_idx, config.imgRot);
}
cudaDeviceSynchronize();
}
void SaveReconImageSlice(const char* filename, float* rec_image, int z_idx, const mango::Config& config)
{
FILE* fp = NULL;
if (z_idx == 0)
fp = fopen(filename, "wb");
else
fp = fopen(filename, "ab");
if (fp == NULL)
{
fprintf(stderr, "Cannot save to file %s!\n", filename);
exit(4);
}
fwrite(rec_image, sizeof(float), config.imgDim*config.imgDim, fp);
fclose(fp);
}
void FreeMemory_Agent(float* &p)
{
cudaFree(p);
p = nullptr;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.